source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
lsgn_data.py | import tensorflow as tf
import tensorflow_hub as hub
import h5py
import json
import numpy as np
import random
import threading
from input_utils import *
import util
import srl_eval_utils
# Names for the "given" tensors.
_input_names = [
"tokens", "context_word_emb", "head_word_emb", "lm_emb", "char_idx", "text_len",
"doc_id", "is_training",
"gold_predicates", "num_gold_predicates",
]
# Names for the "gold" tensors.
_label_names = [
"predicates", "arg_starts", "arg_ends", "arg_labels", "srl_len",
]
# Name for predicted tensors.
_predict_names = [
"candidate_starts", "candidate_ends", "candidate_arg_scores", "candidate_pred_scores",
"arg_starts", "arg_ends", "predicates", "num_args", "num_preds", "arg_labels", "srl_scores",
"arg_scores", "pred_scores", "head_scores"
]
class LSGNData(object):
def __init__(self, config):
self.config = config
self.context_embeddings = util.EmbeddingDictionary(config["context_embeddings"])
self.head_embeddings = util.EmbeddingDictionary(config["head_embeddings"],
maybe_cache=self.context_embeddings)
self.char_embedding_size = config["char_embedding_size"]
self.char_dict = util.load_char_dict(config["char_vocab_path"])
self.lm_file = None
self.lm_hub = None
self.lm_layers = 0 # TODO: Remove these.
self.lm_size = 0
if config["lm_path"]:
if "tfhub" in config["lm_path"]:
print ("Using tensorflow hub:", config["lm_path"])
self.lm_hub = hub.Module(config["lm_path"].encode("utf-8"), trainable=False)
else:
self.lm_file = h5py.File(self.config["lm_path"], "r")
self.lm_layers = self.config["lm_layers"]
self.lm_size = self.config["lm_size"]
self.adjunct_roles, self.core_roles = split_srl_labels(
config["srl_labels"], config["include_c_v"])
self.srl_labels_inv = [""] + self.adjunct_roles + self.core_roles
self.srl_labels = { l:i for i,l in enumerate(self.srl_labels_inv) }
# IO Stuff.
# Need to make sure they are in the same order as input_names + label_names
self.input_props = [
(tf.string, [None]), # String tokens.
(tf.float32, [None, self.context_embeddings.size]), # Context embeddings.
(tf.float32, [None, self.head_embeddings.size]), # Head embeddings.
(tf.float32, [None, self.lm_size, self.lm_layers]), # LM embeddings.
(tf.int32, [None, None]), # Character indices.
(tf.int32, []), # Text length.
(tf.int32, []), # Document ID.
(tf.bool, []), # Is training.
(tf.int32, [None]), # Gold predicate ids (for input).
(tf.int32, []), # Num gold predicates (for input).
(tf.int32, [None]), # Predicate ids (length=num_srl_relations).
(tf.int32, [None]), # Argument starts.
(tf.int32, [None]), # Argument ends.
(tf.int32, [None]), # SRL labels.
(tf.int32, []) # Number of SRL relations.
]
self.input_names = _input_names
self.label_names = _label_names
self.predict_names = _predict_names
self.batch_size = self.config["batch_size"]
dtypes, shapes = zip(*self.input_props)
if self.batch_size > 0 and self.config["max_tokens_per_batch"] < 0:
# Use fixed batch size if number of words per batch is not limited (-1).
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in self.input_props]
queue = tf.PaddingFIFOQueue(capacity=self.batch_size * 2, dtypes=dtypes, shapes=shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue_many(self.batch_size)
else:
# Use dynamic batch size.
new_shapes = [[None] + shape for shape in shapes]
self.queue_input_tensors = [tf.placeholder(dtype, shape) for dtype, shape in zip(dtypes, new_shapes)]
queue = tf.PaddingFIFOQueue(capacity=2, dtypes=dtypes, shapes=new_shapes)
self.enqueue_op = queue.enqueue(self.queue_input_tensors)
self.input_tensors = queue.dequeue()
num_features = len(self.input_names)
self.input_dict = dict(zip(self.input_names, self.input_tensors[:num_features]))
self.labels_dict = dict(zip(self.label_names, self.input_tensors[num_features:]))
def start_enqueue_thread(self, session):
with open(self.config["train_path"], "r") as f:
train_examples = [json.loads(jsonline) for jsonline in f.readlines()]
populate_sentence_offset(train_examples)
def _enqueue_loop():
adaptive_batching = (self.config["max_tokens_per_batch"] > 0)
while True:
random.shuffle(train_examples)
doc_examples = [] # List of list of examples.
cluster_id_offset = 0
num_sentences = 0
num_mentions = 0
for doc_id, example in enumerate(train_examples):
doc_examples.append([])
for e in self.split_document_example(example):
e["doc_id"] = doc_id + 1
e["cluster_id_offset"] = cluster_id_offset
doc_examples[-1].append(e)
#num_mentions += len(e["coref"])
#cluster_id_offset += len(example["clusters"])
num_sentences += len(doc_examples[-1])
print ("Load {} training documents with {} sentences".format(doc_id, num_sentences))
tensor_names = self.input_names + self.label_names
batch_buffer = []
num_tokens_in_batch = 0
for examples in doc_examples:
tensor_examples = [self.tensorize_example(e, is_training=True) for e in examples]
if self.config["batch_size"] == -1:
# Random truncation.
num_sents = len(tensor_examples)
max_training_sents = self.config["max_training_sentences"]
if num_sents > max_training_sents:
sentence_offset = random.randint(0, num_sents - max_training_sents)
tensor_examples = tensor_examples[sentence_offset:sentence_offset + max_training_sents]
batched_tensor_examples = [pad_batch_tensors(tensor_examples, tn) for tn in tensor_names]
feed_dict = dict(zip(self.queue_input_tensors, batched_tensor_examples))
session.run(self.enqueue_op, feed_dict=feed_dict)
elif adaptive_batching:
for tensor_example in tensor_examples:
num_tokens = tensor_example["text_len"]
if len(batch_buffer) >= self.config["batch_size"] or (
num_tokens_in_batch + num_tokens > self.config["max_tokens_per_batch"]):
batched_tensor_examples = [pad_batch_tensors(batch_buffer, tn) for tn in tensor_names]
feed_dict = dict(zip(self.queue_input_tensors, batched_tensor_examples))
session.run(self.enqueue_op, feed_dict=feed_dict)
batch_buffer = []
num_tokens_in_batch = 0
batch_buffer.append(tensor_example)
num_tokens_in_batch += num_tokens
else:
for tensor_example in tensor_examples:
feed_dict = dict(zip(self.queue_input_tensors, [tensor_example[tn] for tn in tensor_names]))
session.run(self.enqueue_op, feed_dict=feed_dict)
# Clear out the batch buffer after each epoch to avoid the situation where the first document
# in the next batch is the same one as the last document in the previous batch.
if len(batch_buffer) > 0:
batched_tensor_examples = [pad_batch_tensors(batch_buffer, tn) for tn in tensor_names]
feed_dict = dict(zip(self.queue_input_tensors, batched_tensor_examples))
session.run(self.enqueue_op, feed_dict=feed_dict)
enqueue_thread = threading.Thread(target=_enqueue_loop)
enqueue_thread.daemon = True
enqueue_thread.start()
def split_document_example(self, example):
"""Split document-based samples into sentence-based samples.
"""
sentences = example["sentences"]
split_examples = []
word_offset = 0
for i, sentence in enumerate(sentences):
text_len = len(sentence)
sent_example = {
"sentence": sentence,
"doc_key": example["doc_key"],
"sent_id": i,
"srl": example["srl"][i] if "srl" in example else [],
"word_offset": word_offset,
"sent_offset": example["sent_offset"] # Sentence offset for the same doc ID.
}
word_offset += text_len
split_examples.append(sent_example)
return split_examples
def tensorize_example(self, example, is_training):
"""Tensorize examples and caching embeddings.
"""
sentence = example["sentence"]
doc_key = example["doc_key"]
sent_id = example["sent_id"] # Number of sentence in the document.
word_offset = example["word_offset"]
text_len = len(sentence)
lm_doc_key = None
lm_sent_key = None
# For historical reasons.
if self.lm_file and "ontonotes" in self.config["lm_path"]:
idx = doc_key.rfind("_")
lm_doc_key = doc_key[:idx] + "/" + str(example["sent_offset"] + sent_id)
elif self.lm_file and "conll05" in self.config["lm_path"]:
lm_doc_key = doc_key[1:] # "S1234" -> "1234"
else:
lm_doc_key = doc_key
lm_sent_key = str(sent_id)
# Load cached LM.
lm_emb = load_lm_embeddings_for_sentence(
self.lm_file, self.lm_layers, self.lm_size, lm_doc_key, lm_sent_key)
max_word_length = max(max(len(w) for w in sentence), max(self.config["filter_widths"]))
context_word_emb = np.zeros([text_len, self.context_embeddings.size])
head_word_emb = np.zeros([text_len, self.head_embeddings.size])
char_index = np.zeros([text_len, max_word_length])
for j, word in enumerate(sentence):
context_word_emb[j] = self.context_embeddings[word]
head_word_emb[j] = self.head_embeddings[word]
char_index[j, :len(word)] = [self.char_dict[c] for c in word]
predicates, arg_starts, arg_ends, arg_labels = (
tensorize_srl_relations(example["srl"], self.srl_labels,
filter_v_args=self.config["filter_v_args"]))
# For gold predicate experiment.
gold_predicates = get_all_predicates(example["srl"]) - word_offset
example_tensor = {
# Inputs.
"tokens": sentence,
"context_word_emb": context_word_emb,
"head_word_emb": head_word_emb,
"lm_emb": lm_emb,
"char_idx": char_index,
"text_len": text_len,
"doc_id": example["doc_id"],
"is_training": is_training,
"gold_predicates": gold_predicates,
"num_gold_predicates": len(gold_predicates),
# Labels.
"predicates": predicates - word_offset,
"arg_starts": arg_starts - word_offset,
"arg_ends": arg_ends - word_offset,
"arg_labels": arg_labels,
"srl_len": len(predicates),
}
return example_tensor
def load_eval_data(self):
eval_data = []
eval_tensors = []
coref_eval_data = []
with open(self.config["eval_path"]) as f:
eval_examples = [json.loads(jsonline) for jsonline in f.readlines()]
populate_sentence_offset(eval_examples)
for doc_id, example in enumerate(eval_examples):
doc_tensors = []
num_mentions_in_doc = 0
for e in self.split_document_example(example):
# Because each batch=1 document at test time, we do not need to offset cluster ids.
e["cluster_id_offset"] = 0
e["doc_id"] = doc_id + 1
doc_tensors.append(self.tensorize_example(e, is_training=False))
#num_mentions_in_doc += len(e["coref"])
#assert num_mentions_in_doc == len(util.flatten(example["clusters"]))
eval_tensors.append(doc_tensors)
eval_data.extend(srl_eval_utils.split_example_for_eval(example))
coref_eval_data.append(example)
print("Loaded {} eval examples.".format(len(eval_data)))
return eval_data, eval_tensors, coref_eval_data
|
taskManager.py | # BSD 2-Clause License
#
# Copyright (c) 2021-2022, Hewlett Packard Enterprise
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import time
from subprocess import PIPE
from threading import RLock, Thread
import psutil
from ...error import LauncherError
from ...log import get_logger
from ..utils.helpers import check_dev_log_level
from .util.shell import execute_async_cmd, execute_cmd
logger = get_logger(__name__)
verbose_tm = check_dev_log_level()
TM_INTERVAL = 1
class TaskManager:
"""The Task Manager watches the subprocesses launched through
the asyncronous shell interface. Each task is a wrapper
around the Popen/Process instance.
The Task Managers polls processes on TM_INTERVAL
and detects job failure and completion. Upon termination, the
task returncode, output, and error are added to the task history.
When a launcher uses the task manager to start a task, the task
is either managed (by a WLM) or unmanaged (meaning not managed by
a WLM). In the latter case, the Task manager is responsible for the
lifecycle of the process.
"""
def __init__(self):
"""Initialize a task manager thread."""
self.actively_monitoring = False
self.task_history = dict()
self.tasks = []
self._lock = RLock()
def start(self):
"""Start the task manager thread
The TaskManager is run as a daemon thread meaning
that it will die when the main thread dies.
"""
monitor = Thread(name="TaskManager", daemon=True, target=self.run)
monitor.start()
def run(self):
"""Start monitoring Tasks"""
global verbose_tm
if verbose_tm:
logger.debug("Starting Task Manager")
self.actively_monitoring = True
while self.actively_monitoring:
time.sleep(TM_INTERVAL)
for task in self.tasks:
returncode = task.check_status() # poll and set returncode
# has to be != None because returncode can be 0
if returncode is not None:
output, error = task.get_io()
self.add_task_history(task.pid, returncode, output, error)
self.remove_task(task.pid)
if len(self) == 0:
self.actively_monitoring = False
if verbose_tm:
logger.debug("Sleeping, no tasks to monitor")
def start_task(self, cmd_list, cwd, env=None, out=PIPE, err=PIPE):
"""Start a task managed by the TaskManager
This is an "unmanaged" task, meaning it is NOT managed
by a workload manager
:param cmd_list: command to run
:type cmd_list: list[str]
:param cwd: current working directory
:type cwd: str
:param env: environment to launch with
:type env: dict[str, str], optional
:param out: output file, defaults to PIPE
:type out: file, optional
:param err: error file, defaults to PIPE
:type err: file, optional
:return: task id
:rtype: int
"""
self._lock.acquire()
try:
proc = execute_async_cmd(cmd_list, cwd, env=env, out=out, err=err)
task = Task(proc)
if verbose_tm:
logger.debug(f"Starting Task {task.pid}")
self.tasks.append(task)
self.task_history[task.pid] = (None, None, None)
return task.pid
finally:
self._lock.release()
def start_and_wait(self, cmd_list, cwd, env=None, timeout=None):
"""Start a task not managed by the TaskManager
This method is used by launchers to launch managed tasks
meaning that they ARE managed by a WLM.
This is primarily used for batch job launches
:param cmd_list: command to run
:type cmd_list: list[str]
:param cwd: current working directory
:type cwd: str
:param env: environment to launch with
:type env: dict[str, str], optional
:param timeout: time to wait, defaults to None
:type timeout: int, optional
:return: returncode, output, and err
:rtype: int, str, str
"""
returncode, out, err = execute_cmd(cmd_list, cwd=cwd, env=env, timeout=timeout)
if verbose_tm:
logger.debug("Ran and waited on task")
return returncode, out, err
def add_existing(self, task_id):
"""Add existing task to be managed by the TaskManager
:param task_id: task id of existing task
:type task_id: int
:raises LauncherError: If task cannot be found
"""
self._lock.acquire()
try:
process = psutil.Process(pid=task_id)
task = Task(process)
self.tasks.append(task)
self.task_history[task.pid] = (None, None, None)
except (psutil.NoSuchProcess, psutil.AccessDenied):
raise LauncherError(f"Process provided {task_id} does not exist") from None
finally:
self._lock.release()
def remove_task(self, task_id):
"""Remove a task from the TaskManager
:param task_id: id of the task to remove
:type task_id: str
"""
self._lock.acquire()
if verbose_tm:
logger.debug(f"Removing Task {task_id}")
try:
task = self[task_id]
if task.is_alive:
task.kill()
returncode = task.check_status()
out, err = task.get_io()
self.add_task_history(task_id, returncode, out, err)
self.tasks.remove(task)
except psutil.NoSuchProcess:
logger.debug("Failed to kill a task during removal")
except KeyError:
logger.debug("Failed to remove a task, task was already removed")
finally:
self._lock.release()
def get_task_update(self, task_id):
"""Get the update of a task
:param task_id: task id
:type task_id: str
:return: status, returncode, output, error
:rtype: str, int, str, str
"""
self._lock.acquire()
try:
rc, out, err = self.task_history[task_id]
# has to be == None because rc can be 0
if rc == None:
try:
task = self[task_id]
return task.status, rc, out, err
# removed forcefully either by OS or us, no returncode set
# either way, job has completed and we won't have returncode
# Usually hits when jobs last less then the TM_INTERVAL
except (KeyError, psutil.NoSuchProcess):
return "Completed", rc, out, err
# process has completed, status set manually as we don't
# save task statuses during runtime.
else:
if rc != 0:
return "Failed", rc, out, err
return "Completed", rc, out, err
finally:
self._lock.release()
def add_task_history(self, task_id, returncode, out=None, err=None):
"""Add a task to the task history
Add a task to record its future returncode, output and error
:param task_id: id of the task
:type task_id: str
:param returncode: returncode
:type returncode: int
:param out: output, defaults to None
:type out: str, optional
:param err: output, defaults to None
:type err: str, optional
"""
self.task_history[task_id] = (returncode, out, err)
def __getitem__(self, task_id):
self._lock.acquire()
try:
for task in self.tasks:
if task.pid == task_id:
return task
raise KeyError
finally:
self._lock.release()
def __len__(self):
self._lock.acquire()
try:
return len(self.tasks)
finally:
self._lock.release()
class Task:
def __init__(self, process):
"""Initialize a task
:param process: Popen object
:type process: psutil.Popen
"""
self.process = process
self.pid = str(self.process.pid)
def check_status(self):
"""Ping the job and return the returncode if finished
:return: returncode if finished otherwise None
:rtype: int
"""
if self.owned:
return self.process.poll()
# we can't manage Processed we don't own
# have to rely on .kill() to stop.
return self.returncode
def get_io(self):
"""Get the IO from the subprocess
:return: output and error from the Popen
:rtype: str, str
"""
# Process class does not implement communicate
if not self.owned:
return None, None
output, error = self.process.communicate()
if output:
output = output.decode("utf-8")
if error:
error = error.decode("utf-8")
return output, error
def kill(self, timeout=10):
"""Kill the subprocess and all children"""
def kill_callback(proc):
logger.debug(f"Process terminated with kill {proc.pid}")
children = self.process.children(recursive=True)
children.append(self.process) # add parent process to be killed
for child in children:
child.kill()
_, alive = psutil.wait_procs(children, timeout=timeout, callback=kill_callback)
if alive:
for proc in alive:
logger.warning(f"Unable to kill emitted process {proc.pid}")
def terminate(self, timeout=10):
"""Terminate a this process and all children.
:param timeout: time to wait for task death, defaults to 10
:type timeout: int, optional
"""
def terminate_callback(proc):
logger.debug(f"Cleanly terminated task {proc.pid}")
children = self.process.children(recursive=True)
children.append(self.process) # add parent process to be killed
# try SIGTERM first for clean exit
for child in children:
if verbose_tm:
logger.debug(child)
child.terminate()
# wait for termination
_, alive = psutil.wait_procs(
children, timeout=timeout, callback=terminate_callback
)
if alive:
logger.debug(f"SIGTERM failed, using SIGKILL")
self.process.kill()
def wait(self):
self.process.wait()
@property
def returncode(self):
if self.owned:
return self.process.returncode
if self.is_alive:
return None
return 0
@property
def is_alive(self):
return self.process.is_running()
@property
def status(self):
return self.process.status()
@property
def owned(self):
if isinstance(self.process, psutil.Popen):
return True
return False
|
utils.py | #!/usr/bin/env/python
import numpy as np
import tensorflow as tf
import queue
import threading
import pickle
from rdkit.Chem import AllChem
from rdkit.Chem import Draw
from rdkit import Chem
from rdkit.Chem import rdmolops
from collections import defaultdict, deque
import os
import heapq
import planarity
import sascorer
from rdkit.Chem import Crippen
from rdkit.Chem import QED
SMALL_NUMBER = 1e-7
LARGE_NUMBER= 1e10
geometry_numbers=[3, 4, 5, 6] # triangle, square, pentagen, hexagon
# bond mapping
bond_dict = {'SINGLE': 0, 'DOUBLE': 1, 'TRIPLE': 2, "AROMATIC": 3}
number_to_bond= {0: Chem.rdchem.BondType.SINGLE, 1:Chem.rdchem.BondType.DOUBLE,
2: Chem.rdchem.BondType.TRIPLE, 3:Chem.rdchem.BondType.AROMATIC}
def dataset_info(dataset): #qm9, zinc, cep
if dataset=='qm9':
return { 'atom_types': ["H", "C", "N", "O", "F"],
'maximum_valence': {0: 1, 1: 4, 2: 3, 3: 2, 4: 1},
'number_to_atom': {0: "H", 1: "C", 2: "N", 3: "O", 4: "F"},
'bucket_sizes': np.array(list(range(4, 28, 2)) + [29])
}
elif dataset=='zinc':
return { 'atom_types': ['Br1(0)', 'C4(0)', 'Cl1(0)', 'F1(0)', 'H1(0)', 'I1(0)',
'N2(-1)', 'N3(0)', 'N4(1)', 'O1(-1)', 'O2(0)', 'S2(0)','S4(0)', 'S6(0)'],
'maximum_valence': {0: 1, 1: 4, 2: 1, 3: 1, 4: 1, 5:1, 6:2, 7:3, 8:4, 9:1, 10:2, 11:2, 12:4, 13:6, 14:3},
'number_to_atom': {0: 'Br', 1: 'C', 2: 'Cl', 3: 'F', 4: 'H', 5:'I', 6:'N', 7:'N', 8:'N', 9:'O', 10:'O', 11:'S', 12:'S', 13:'S'},
'bucket_sizes': np.array([28,31,33,35,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,53,55,58,84])
}
elif dataset=="cep":
return { 'atom_types': ["C", "S", "N", "O", "Se", "Si"],
'maximum_valence': {0: 4, 1: 2, 2: 3, 3: 2, 4: 2, 5: 4},
'number_to_atom': {0: "C", 1: "S", 2: "N", 3: "O", 4: "Se", 5: "Si"},
'bucket_sizes': np.array([25,28,29,30, 32, 33,34,35,36,37,38,39,43,46])
}
else:
print("the datasets in use are qm9|zinc|cep")
exit(1)
# add one edge to adj matrix
def add_edge_mat(amat, src, dest, e, considering_edge_type=True):
if considering_edge_type:
amat[e, dest, src] = 1
amat[e, src, dest] = 1
else:
amat[src, dest] = 1
amat[dest, src] = 1
def graph_to_adj_mat(graph, max_n_vertices, num_edge_types, tie_fwd_bkwd=True, considering_edge_type=True):
if considering_edge_type:
amat = np.zeros((num_edge_types, max_n_vertices, max_n_vertices))
for src, e, dest in graph:
add_edge_mat(amat, src, dest, e)
else:
amat = np.zeros((max_n_vertices, max_n_vertices))
for src, e, dest in graph:
add_edge_mat(amat, src, dest, e, considering_edge_type=False)
return amat
def check_edge_prob(dataset):
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
for ep, epl in zip(edge_prob, edge_prob_label):
print("prediction")
print(ep)
print("label")
print(epl)
# check whether a graph is planar or not
def is_planar(location, adj_list, is_dense=False):
if is_dense:
new_adj_list=defaultdict(list)
for x in range(len(adj_list)):
for y in range(len(adj_list)):
if adj_list[x][y]==1:
new_adj_list[x].append((y,1))
adj_list=new_adj_list
edges=[]
seen=set()
for src, l in adj_list.items():
for dst, e in l:
if (dst, src) not in seen:
edges.append((src,dst))
seen.add((src,dst))
edges+=[location, (location[1], location[0])]
return planarity.is_planar(edges)
def check_edge_type_prob(filter=None):
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
for ep, epl in zip(edge_type_prob, edge_type_label):
print("prediction")
print(ep)
print("label")
print(epl)
def check_mean(dataset, filter=None):
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
print(mean.tolist()[:40])
def check_variance(dataset, filter=None):
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
print(np.exp(logvariance).tolist()[:40])
def check_node_prob(filter=None):
print(dataset)
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
print(node_symbol_prob[0])
print(node_symbol[0])
print(node_symbol_prob.shape)
def check_qed(filter=None):
with open('intermediate_results_%s' % dataset, 'rb') as f:
adjacency_matrix, edge_type_prob, edge_type_label, node_symbol_prob, node_symbol, edge_prob, edge_prob_label, qed_prediction, qed_labels,mean, logvariance=pickle.load(f)
print(qed_prediction)
print(qed_labels[0])
print(np.mean(np.abs(qed_prediction-qed_labels[0])))
def onehot(idx, len):
z = [0 for _ in range(len)]
z[idx] = 1
return z
def generate_empty_adj_matrix(maximum_vertice_num):
return np.zeros((1, 3, maximum_vertice_num, maximum_vertice_num))
# standard normal with shape [a1, a2, a3]
def generate_std_normal(a1, a2, a3):
return np.random.normal(0, 1, [a1, a2, a3])
def check_validity(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
count=0
for smiles in all_smiles:
mol = Chem.MolFromSmiles(smiles)
if mol is not None:
count+=1
return len(all_smiles), count
# Get length for each graph based on node masks
def get_graph_length(all_node_mask):
all_lengths=[]
for graph in all_node_mask:
if 0 in graph:
length=np.argmin(graph)
else:
length=len(graph)
all_lengths.append(length)
return all_lengths
def make_dir(path):
if not os.path.exists(path):
os.mkdir(path)
print('made directory %s' % path)
# sample node symbols based on node predictions
def sample_node_symbol(all_node_symbol_prob, all_lengths, dataset):
all_node_symbol=[]
for graph_idx, graph_prob in enumerate(all_node_symbol_prob):
node_symbol=[]
for node_idx in range(all_lengths[graph_idx]):
symbol=np.random.choice(np.arange(len(dataset_info(dataset)['atom_types'])), p=graph_prob[node_idx])
node_symbol.append(symbol)
all_node_symbol.append(node_symbol)
return all_node_symbol
def dump(file_name, content):
with open(file_name, 'wb') as out_file:
pickle.dump(content, out_file, pickle.HIGHEST_PROTOCOL)
def load(file_name):
with open(file_name, 'rb') as f:
return pickle.load(f)
# generate a new feature on whether adding the edges will generate more than two overlapped edges for rings
def get_overlapped_edge_feature(edge_mask, color, new_mol):
overlapped_edge_feature=[]
for node_in_focus, neighbor in edge_mask:
if color[neighbor] == 1:
# attempt to add the edge
new_mol.AddBond(int(node_in_focus), int(neighbor), number_to_bond[0])
# Check whether there are two cycles having more than two overlap edges
try:
ssr = Chem.GetSymmSSSR(new_mol)
except:
ssr = []
overlap_flag = False
for idx1 in range(len(ssr)):
for idx2 in range(idx1+1, len(ssr)):
if len(set(ssr[idx1]) & set(ssr[idx2])) > 2:
overlap_flag=True
# remove that edge
new_mol.RemoveBond(int(node_in_focus), int(neighbor))
if overlap_flag:
overlapped_edge_feature.append((node_in_focus, neighbor))
return overlapped_edge_feature
# adj_list [3, v, v] or defaultdict. bfs distance on a graph
def bfs_distance(start, adj_list, is_dense=False):
distances={}
visited=set()
queue=deque([(start, 0)])
visited.add(start)
while len(queue) != 0:
current, d=queue.popleft()
for neighbor, edge_type in adj_list[current]:
if neighbor not in visited:
distances[neighbor]=d+1
visited.add(neighbor)
queue.append((neighbor, d+1))
return [(start, node, d) for node, d in distances.items()]
def get_initial_valence(node_symbol, dataset):
return [dataset_info(dataset)['maximum_valence'][s] for s in node_symbol]
def add_atoms(new_mol, node_symbol, dataset):
for number in node_symbol:
if dataset=='qm9' or dataset=='cep':
idx=new_mol.AddAtom(Chem.Atom(dataset_info(dataset)['number_to_atom'][number]))
elif dataset=='zinc':
new_atom = Chem.Atom(dataset_info(dataset)['number_to_atom'][number])
charge_num=int(dataset_info(dataset)['atom_types'][number].split('(')[1].strip(')'))
new_atom.SetFormalCharge(charge_num)
new_mol.AddAtom(new_atom)
def visualize_mol(path, new_mol):
AllChem.Compute2DCoords(new_mol)
print(path)
Draw.MolToFile(new_mol,path)
def get_idx_of_largest_frag(frags):
return np.argmax([len(frag) for frag in frags])
def remove_extra_nodes(new_mol):
frags=Chem.rdmolops.GetMolFrags(new_mol)
while len(frags) > 1:
# Get the idx of the frag with largest length
largest_idx = get_idx_of_largest_frag(frags)
for idx in range(len(frags)):
if idx != largest_idx:
# Remove one atom that is not in the largest frag
new_mol.RemoveAtom(frags[idx][0])
break
frags=Chem.rdmolops.GetMolFrags(new_mol)
def novelty_metric(dataset):
with open('all_smiles_%s.pkl' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
with open('generated_smiles_%s' % dataset, 'rb') as f:
generated_all_smiles=set(pickle.load(f))
total_new_molecules=0
for generated_smiles in generated_all_smiles:
if generated_smiles not in all_smiles:
total_new_molecules+=1
return float(total_new_molecules)/len(generated_all_smiles)
def count_edge_type(dataset, generated=True):
if generated:
filename='generated_smiles_%s' % dataset
else:
filename='all_smiles_%s.pkl' % dataset
with open(filename, 'rb') as f:
all_smiles=set(pickle.load(f))
counter=defaultdict(int)
edge_type_per_molecule=[]
for smiles in all_smiles:
nodes, edges=to_graph(smiles, dataset)
edge_type_this_molecule=[0]* len(bond_dict)
for edge in edges:
edge_type=edge[1]
edge_type_this_molecule[edge_type]+=1
counter[edge_type]+=1
edge_type_per_molecule.append(edge_type_this_molecule)
total_sum=0
return len(all_smiles), counter, edge_type_per_molecule
def need_kekulize(mol):
for bond in mol.GetBonds():
if bond_dict[str(bond.GetBondType())] >= 3:
return True
return False
def check_planar(dataset):
with open("generated_smiles_%s" % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
total_non_planar=0
for smiles in all_smiles:
try:
nodes, edges=to_graph(smiles, dataset)
except:
continue
edges=[(src, dst) for src, e, dst in edges]
if edges==[]:
continue
if not planarity.is_planar(edges):
total_non_planar+=1
return len(all_smiles), total_non_planar
def count_atoms(dataset):
with open("generated_smiles_%s" % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
counter=defaultdict(int)
atom_count_per_molecule=[] # record the counts for each molecule
for smiles in all_smiles:
try:
nodes, edges=to_graph(smiles, dataset)
except:
continue
atom_count_this_molecule=[0]*len(dataset_info(dataset)['atom_types'])
for node in nodes:
atom_type=np.argmax(node)
atom_count_this_molecule[atom_type]+=1
counter[atom_type]+=1
atom_count_per_molecule.append(atom_count_this_molecule)
total_sum=0
return len(all_smiles), counter, atom_count_per_molecule
def to_graph(smiles, dataset):
mol = Chem.MolFromSmiles(smiles)
if mol is None:
return [], []
# Kekulize it
if need_kekulize(mol):
rdmolops.Kekulize(mol)
if mol is None:
return None, None
# remove stereo information, such as inward and outward edges
Chem.RemoveStereochemistry(mol)
edges = []
nodes = []
for bond in mol.GetBonds():
edges.append((bond.GetBeginAtomIdx(), bond_dict[str(bond.GetBondType())], bond.GetEndAtomIdx()))
assert bond_dict[str(bond.GetBondType())] != 3
for atom in mol.GetAtoms():
if dataset=='qm9' or dataset=="cep":
nodes.append(onehot(dataset_info(dataset)['atom_types'].index(atom.GetSymbol()), len(dataset_info(dataset)['atom_types'])))
elif dataset=='zinc': # transform using "<atom_symbol><valence>(<charge>)" notation
symbol = atom.GetSymbol()
valence = atom.GetTotalValence()
charge = atom.GetFormalCharge()
atom_str = "%s%i(%i)" % (symbol, valence, charge)
if atom_str not in dataset_info(dataset)['atom_types']:
print('unrecognized atom type %s' % atom_str)
return [], []
nodes.append(onehot(dataset_info(dataset)['atom_types'].index(atom_str), len(dataset_info(dataset)['atom_types'])))
return nodes, edges
def check_uniqueness(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=pickle.load(f)
original_num = len(all_smiles)
all_smiles=set(all_smiles)
new_num = len(all_smiles)
return new_num/original_num
def shape_count(dataset, remove_print=False, all_smiles=None):
if all_smiles==None:
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
geometry_counts=[0]*len(geometry_numbers)
geometry_counts_per_molecule=[] # record the geometry counts for each molecule
for smiles in all_smiles:
nodes, edges = to_graph(smiles, dataset)
if len(edges)<=0:
continue
new_mol=Chem.MolFromSmiles(smiles)
ssr = Chem.GetSymmSSSR(new_mol)
counts_for_molecule=[0] * len(geometry_numbers)
for idx in range(len(ssr)):
ring_len=len(list(ssr[idx]))
if ring_len in geometry_numbers:
geometry_counts[geometry_numbers.index(ring_len)]+=1
counts_for_molecule[geometry_numbers.index(ring_len)]+=1
geometry_counts_per_molecule.append(counts_for_molecule)
return len(all_smiles), geometry_counts, geometry_counts_per_molecule
def check_adjacent_sparse(adj_list, node, neighbor_in_doubt):
for neighbor, edge_type in adj_list[node]:
if neighbor == neighbor_in_doubt:
return True, edge_type
return False, None
def glorot_init(shape):
initialization_range = np.sqrt(6.0 / (shape[-2] + shape[-1]))
return np.random.uniform(low=-initialization_range, high=initialization_range, size=shape).astype(np.float32)
class ThreadedIterator:
"""An iterator object that computes its elements in a parallel thread to be ready to be consumed.
The iterator should *not* return None"""
def __init__(self, original_iterator, max_queue_size: int=2):
self.__queue = queue.Queue(maxsize=max_queue_size)
self.__thread = threading.Thread(target=lambda: self.worker(original_iterator))
self.__thread.start()
def worker(self, original_iterator):
for element in original_iterator:
assert element is not None, 'By convention, iterator elements much not be None'
self.__queue.put(element, block=True)
self.__queue.put(None, block=True)
def __iter__(self):
next_element = self.__queue.get(block=True)
while next_element is not None:
yield next_element
next_element = self.__queue.get(block=True)
self.__thread.join()
# Implements multilayer perceptron
class MLP(object):
def __init__(self, in_size, out_size, hid_sizes, dropout_keep_prob):
self.in_size = in_size
self.out_size = out_size
self.hid_sizes = hid_sizes
self.dropout_keep_prob = dropout_keep_prob
self.params = self.make_network_params()
def make_network_params(self):
dims = [self.in_size] + self.hid_sizes + [self.out_size]
weight_sizes = list(zip(dims[:-1], dims[1:]))
weights = [tf.Variable(self.init_weights(s), name='MLP_W_layer%i' % i)
for (i, s) in enumerate(weight_sizes)]
biases = [tf.Variable(np.zeros(s[-1]).astype(np.float32), name='MLP_b_layer%i' % i)
for (i, s) in enumerate(weight_sizes)]
network_params = {
"weights": weights,
"biases": biases,
}
return network_params
def init_weights(self, shape):
return np.sqrt(6.0 / (shape[-2] + shape[-1])) * (2 * np.random.rand(*shape).astype(np.float32) - 1)
def __call__(self, inputs):
acts = inputs
for W, b in zip(self.params["weights"], self.params["biases"]):
hid = tf.matmul(acts, tf.nn.dropout(W, self.dropout_keep_prob)) + b
acts = tf.nn.relu(hid)
last_hidden = hid
return last_hidden
class Graph():
def __init__(self, V, g):
self.V = V
self.graph = g
def addEdge(self, v, w):
# Add w to v ist.
self.graph[v].append(w)
# Add v to w list.
self.graph[w].append(v)
# A recursive function that uses visited[]
# and parent to detect cycle in subgraph
# reachable from vertex v.
def isCyclicUtil(self, v, visited, parent):
# Mark current node as visited
visited[v] = True
# Recur for all the vertices adjacent
# for this vertex
for i in self.graph[v]:
# If an adjacent is not visited,
# then recur for that adjacent
if visited[i] == False:
if self.isCyclicUtil(i, visited, v) == True:
return True
# If an adjacent is visited and not
# parent of current vertex, then there
# is a cycle.
elif i != parent:
return True
return False
# Returns true if the graph is a tree,
# else false.
def isTree(self):
# Mark all the vertices as not visited
# and not part of recursion stack
visited = [False] * self.V
# The call to isCyclicUtil serves multiple
# purposes. It returns true if graph reachable
# from vertex 0 is cyclcic. It also marks
# all vertices reachable from 0.
if self.isCyclicUtil(0, visited, -1) == True:
return False
# If we find a vertex which is not reachable
# from 0 (not marked by isCyclicUtil(),
# then we return false
for i in range(self.V):
if visited[i] == False:
return False
return True
# whether whether the graphs has no cycle or not
def check_cyclic(dataset, generated=True):
if generated:
with open("generated_smiles_%s" % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
else:
with open("all_smiles_%s.pkl" % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
tree_count=0
for smiles in all_smiles:
nodes, edges=to_graph(smiles, dataset)
edges=[(src, dst) for src, e, dst in edges]
if edges==[]:
continue
new_adj_list=defaultdict(list)
for src, dst in edges:
new_adj_list[src].append(dst)
new_adj_list[dst].append(src)
graph=Graph(len(nodes), new_adj_list)
if graph.isTree():
tree_count+=1
return len(all_smiles), tree_count
def check_sascorer(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
sa_sum=0
total=0
sa_score_per_molecule=[]
for smiles in all_smiles:
new_mol=Chem.MolFromSmiles(smiles)
try:
val = sascorer.calculateScore(new_mol)
except:
continue
sa_sum+=val
sa_score_per_molecule.append(val)
total+=1
return sa_sum/total, sa_score_per_molecule
def check_logp(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
logp_sum=0
total=0
logp_score_per_molecule=[]
for smiles in all_smiles:
new_mol=Chem.MolFromSmiles(smiles)
try:
val = Crippen.MolLogP(new_mol)
except:
continue
logp_sum+=val
logp_score_per_molecule.append(val)
total+=1
return logp_sum/total, logp_score_per_molecule
def check_qed(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
qed_sum=0
total=0
qed_score_per_molecule=[]
for smiles in all_smiles:
new_mol=Chem.MolFromSmiles(smiles)
try:
val = QED.qed(new_mol)
except:
continue
qed_sum+=val
qed_score_per_molecule.append(val)
total+=1
return qed_sum/total, qed_score_per_molecule
def sssr_metric(dataset):
with open('generated_smiles_%s' % dataset, 'rb') as f:
all_smiles=set(pickle.load(f))
overlapped_molecule=0
for smiles in all_smiles:
new_mol=Chem.MolFromSmiles(smiles)
ssr = Chem.GetSymmSSSR(new_mol)
overlap_flag=False
for idx1 in range(len(ssr)):
for idx2 in range(idx1+1, len(ssr)):
if len(set(ssr[idx1]) & set(ssr[idx2])) > 2:
overlap_flag=True
if overlap_flag:
overlapped_molecule+=1
return overlapped_molecule/len(all_smiles)
# select the best based on shapes and probs
def select_best(all_mol):
# sort by shape
all_mol=sorted(all_mol)
best_shape=all_mol[-1][0]
all_mol=[(p, m) for s, p, m in all_mol if s==best_shape]
# sort by probs
all_mol=sorted(all_mol)
return all_mol[-1][1]
# a series util function converting sparse matrix representation to dense
def incre_adj_mat_to_dense(incre_adj_mat, num_edge_types, maximum_vertice_num):
new_incre_adj_mat=[]
for sparse_incre_adj_mat in incre_adj_mat:
dense_incre_adj_mat=np.zeros((num_edge_types, maximum_vertice_num,maximum_vertice_num))
for current, adj_list in sparse_incre_adj_mat.items():
for neighbor, edge_type in adj_list:
dense_incre_adj_mat[edge_type][current][neighbor]=1
new_incre_adj_mat.append(dense_incre_adj_mat)
return new_incre_adj_mat # [number_iteration,num_edge_types,maximum_vertice_num, maximum_vertice_num]
def distance_to_others_dense(distance_to_others, maximum_vertice_num):
new_all_distance=[]
for sparse_distances in distance_to_others:
dense_distances=np.zeros((maximum_vertice_num), dtype=int)
for x, y, d in sparse_distances:
dense_distances[y]=d
new_all_distance.append(dense_distances)
return new_all_distance # [number_iteration, maximum_vertice_num]
def overlapped_edge_features_to_dense(overlapped_edge_features, maximum_vertice_num):
new_overlapped_edge_features=[]
for sparse_overlapped_edge_features in overlapped_edge_features:
dense_overlapped_edge_features=np.zeros((maximum_vertice_num), dtype=int)
for node_in_focus, neighbor in sparse_overlapped_edge_features:
dense_overlapped_edge_features[neighbor]=1
new_overlapped_edge_features.append(dense_overlapped_edge_features)
return new_overlapped_edge_features # [number_iteration, maximum_vertice_num]
def node_sequence_to_dense(node_sequence,maximum_vertice_num):
new_node_sequence=[]
for node in node_sequence:
s=[0]*maximum_vertice_num
s[node]=1
new_node_sequence.append(s)
return new_node_sequence # [number_iteration, maximum_vertice_num]
def edge_type_masks_to_dense(edge_type_masks, maximum_vertice_num, num_edge_types):
new_edge_type_masks=[]
for mask_sparse in edge_type_masks:
mask_dense=np.zeros([num_edge_types, maximum_vertice_num])
for node_in_focus, neighbor, bond in mask_sparse:
mask_dense[bond][neighbor]=1
new_edge_type_masks.append(mask_dense)
return new_edge_type_masks #[number_iteration, 3, maximum_vertice_num]
def edge_type_labels_to_dense(edge_type_labels, maximum_vertice_num,num_edge_types):
new_edge_type_labels=[]
for labels_sparse in edge_type_labels:
labels_dense=np.zeros([num_edge_types, maximum_vertice_num])
for node_in_focus, neighbor, bond in labels_sparse:
labels_dense[bond][neighbor]= 1/float(len(labels_sparse)) # fix the probability bug here.
new_edge_type_labels.append(labels_dense)
return new_edge_type_labels #[number_iteration, 3, maximum_vertice_num]
def edge_masks_to_dense(edge_masks, maximum_vertice_num):
new_edge_masks=[]
for mask_sparse in edge_masks:
mask_dense=[0] * maximum_vertice_num
for node_in_focus, neighbor in mask_sparse:
mask_dense[neighbor]=1
new_edge_masks.append(mask_dense)
return new_edge_masks # [number_iteration, maximum_vertice_num]
def edge_labels_to_dense(edge_labels, maximum_vertice_num):
new_edge_labels=[]
for label_sparse in edge_labels:
label_dense=[0] * maximum_vertice_num
for node_in_focus, neighbor in label_sparse:
label_dense[neighbor]=1/float(len(label_sparse))
new_edge_labels.append(label_dense)
return new_edge_labels # [number_iteration, maximum_vertice_num] |
test_session.py | import os
import tempfile
import threading
import unittest
import web
class SessionTest(unittest.TestCase):
def setUp(self):
app = web.auto_application()
session = self.make_session(app)
class count(app.page):
def GET(self):
session.count += 1
return str(session.count)
class reset(app.page):
def GET(self):
session.kill()
return ""
class redirect(app.page):
def GET(self):
session.request_token = "123"
raise web.redirect("/count")
class get_session(app.page):
path = "/session/(.*)"
def GET(self, name):
return session[name]
self.app = app
self.session = session
def make_session(self, app):
dir = tempfile.mkdtemp()
store = web.session.DiskStore(dir)
return web.session.Session(app, store, {"count": 0})
def testSession(self):
b = self.app.browser()
self.assertEqual(b.open("/count").read(), b"1")
self.assertEqual(b.open("/count").read(), b"2")
self.assertEqual(b.open("/count").read(), b"3")
b.open("/reset")
self.assertEqual(b.open("/count").read(), b"1")
def testParallelSessions(self):
b1 = self.app.browser()
b2 = self.app.browser()
b1.open("/count")
for i in range(1, 10):
self.assertEqual(b1.open("/count").read(), str(i + 1).encode("utf8"))
self.assertEqual(b2.open("/count").read(), str(i).encode("utf8"))
def testBadSessionId(self):
b = self.app.browser()
self.assertEqual(b.open("/count").read(), b"1")
self.assertEqual(b.open("/count").read(), b"2")
cookie = b.cookiejar._cookies["0.0.0.0"]["/"]["webpy_session_id"]
cookie.value = "/etc/password"
self.assertEqual(b.open("/count").read(), b"1")
def testSlowCookies(self):
b = self.app.browser()
self.assertEqual(b.open("/count").read(), b"1")
self.assertEqual(b.open("/count").read(), b"2")
cookie = b.cookiejar._cookies["0.0.0.0"]["/"]["webpy_session_id"]
cookie.value = '"/etc/password"'
self.assertEqual(b.open("/count").read(), b"1")
def testRedirect(self):
b = self.app.browser()
b.open("/redirect")
b.open("/session/request_token")
self.assertEqual(b.data, b"123")
class DiskStoreTest(unittest.TestCase):
def testStoreConcurrent(self):
dir = tempfile.mkdtemp()
store = web.session.DiskStore(dir)
def set_val():
store["fail"] = "value"
for c in range(10):
m = threading.Thread(target=set_val)
m.start()
try:
value = store["fail"]
except KeyError:
pass
self.assertEqual(value, "value")
class DBSessionTest(SessionTest):
"""Session test with db store."""
def make_session(self, app):
if os.path.exists("webpy.db"):
os.remove("webpy.db")
db = web.database(dbn="sqlite", db="webpy.db")
# db.printing = True
db.query(
""
+ "CREATE TABLE session ("
+ " session_id char(128) unique not null,"
+ " atime timestamp default (datetime('now','utc')),"
+ " data text)"
)
store = web.session.DBStore(db, "session")
return web.session.Session(app, store, {"count": 0})
class MemorySessionTest(SessionTest):
"""Session test with db store."""
def make_session(self, app):
store = web.session.MemoryStore()
return web.session.Session(app, store, {"count": 0})
|
controllers.py | """maintains all functionality related running virtual machines, starting and tracking tests."""
import datetime
import hashlib
import json
import os
import shutil
import sys
from multiprocessing import Process
from typing import Any
import requests
from flask import (Blueprint, abort, flash, g, jsonify, redirect, request,
url_for)
from git import GitCommandError, InvalidGitRepositoryError, Repo
from github import ApiError, GitHub
from lxml import etree
from lxml.etree import Element
from markdown2 import markdown
from pymysql.err import IntegrityError
from sqlalchemy import and_, func, or_
from sqlalchemy.sql import label
from sqlalchemy.sql.functions import count
from werkzeug.utils import secure_filename
from decorators import get_menu_entries, template_renderer
from mailer import Mailer
from mod_auth.controllers import check_access_rights, login_required
from mod_auth.models import Role
from mod_ci.forms import AddUsersToBlacklist, RemoveUsersFromBlacklist
from mod_ci.models import BlockedUsers, Kvm, MaintenanceMode
from mod_customized.models import CustomizedTest
from mod_deploy.controllers import is_valid_signature, request_from_github
from mod_home.models import CCExtractorVersion, GeneralData
from mod_regression.models import (Category, RegressionTest,
RegressionTestOutput,
regressionTestLinkTable)
from mod_sample.models import Issue
from mod_test.models import (Fork, Test, TestPlatform, TestProgress,
TestResult, TestResultFile, TestStatus, TestType)
if sys.platform.startswith("linux"):
import libvirt
mod_ci = Blueprint('ci', __name__)
class Status:
"""Define different states for the tests."""
PENDING = "pending"
SUCCESS = "success"
ERROR = "error"
FAILURE = "failure"
@mod_ci.before_app_request
def before_app_request() -> None:
"""Organize menu content such as Platform management before request."""
config_entries = get_menu_entries(
g.user, 'Platform mgmt', 'cog', [], '', [
{'title': 'Maintenance', 'icon': 'wrench',
'route': 'ci.show_maintenance', 'access': [Role.admin]}, # type: ignore
{'title': 'Blocked Users', 'icon': 'ban',
'route': 'ci.blocked_users', 'access': [Role.admin]} # type: ignore
]
)
if 'config' in g.menu_entries and 'entries' in config_entries:
g.menu_entries['config']['entries'] = config_entries['entries'] + g.menu_entries['config']['entries']
else:
g.menu_entries['config'] = config_entries
def start_platforms(db, repository, delay=None, platform=None) -> None:
"""
Start new test on both platforms in parallel.
We use multiprocessing module which bypasses Python GIL to make use of multiple cores of the processor.
"""
from run import config, log, app
with app.app_context():
from flask import current_app
if platform is None or platform == TestPlatform.linux:
linux_kvm_name = config.get('KVM_LINUX_NAME', '')
log.info('Define process to run Linux VM')
linux_process = Process(target=kvm_processor, args=(current_app._get_current_object(), db, linux_kvm_name,
TestPlatform.linux, repository, delay,))
linux_process.start()
log.info('Linux VM process kicked off')
if platform is None or platform == TestPlatform.windows:
win_kvm_name = config.get('KVM_WINDOWS_NAME', '')
log.info('Define process to run Windows VM')
windows_process = Process(target=kvm_processor, args=(current_app._get_current_object(), db, win_kvm_name,
TestPlatform.windows, repository, delay,))
windows_process.start()
log.info('Windows VM process kicked off')
def kvm_processor(app, db, kvm_name, platform, repository, delay) -> None:
"""
Check whether there is no already running same kvm.
Checks whether machine is in maintenance mode or not
Launch kvm if not used by any other test
Creates testing xml files to test the change in main repo.
Creates clone with separate branch and merge pr into it.
:param app: The Flask app
:type app: Flask
:param db: database connection
:type db: sqlalchemy.orm.scoped_session
:param kvm_name: name for the kvm
:type kvm_name: str
:param platform: operating system
:type platform: str
:param repository: repository to run tests on
:type repository: str
:param delay: time delay after which to start kvm processor
:type delay: int
"""
from run import config, log, get_github_config
github_config = get_github_config(config)
log.info(f"[{platform}] Running kvm_processor")
if kvm_name == "":
log.critical(f'[{platform}] KVM name is empty!')
return
if delay is not None:
import time
log.debug(f'[{platform}] Sleeping for {delay} seconds')
time.sleep(delay)
maintenance_mode = MaintenanceMode.query.filter(MaintenanceMode.platform == platform).first()
if maintenance_mode is not None and maintenance_mode.disabled:
log.debug(f'[{platform}] In maintenance mode! Waiting...')
return
conn = libvirt.open("qemu:///system")
if conn is None:
log.critical(f"[{platform}] Connection to libvirt failed!")
return
try:
vm = conn.lookupByName(kvm_name)
except libvirt.libvirtError:
log.critical(f"[{platform}] No VM named {kvm_name} found!")
return
vm_info = vm.info()
if vm_info[0] != libvirt.VIR_DOMAIN_SHUTOFF:
# Running, check expiry and compare to runtime
status = Kvm.query.filter(Kvm.name == kvm_name).first()
max_runtime = config.get("KVM_MAX_RUNTIME", 120)
if status is not None:
if datetime.datetime.now() - status.timestamp >= datetime.timedelta(minutes=max_runtime):
test_progress = TestProgress(status.test.id, TestStatus.canceled, 'Runtime exceeded')
db.add(test_progress)
db.delete(status)
db.commit()
if vm.destroy() == -1:
log.critical(f"[{platform}] Failed to shut down {kvm_name}")
return
else:
log.info(f"[{platform}] Current job not expired yet.")
return
else:
log.warn(f"[{platform}] No task, but VM is running! Hard reset necessary")
if vm.destroy() == -1:
log.critical(f"[{platform}] Failed to shut down {kvm_name}")
return
# Check if there's no KVM status left
status = Kvm.query.filter(Kvm.name == kvm_name).first()
if status is not None:
log.warn(f"[{platform}] KVM is powered off, but test {status.test.id} still present, deleting entry")
db.delete(status)
db.commit()
# Get oldest test for this platform
finished_tests = db.query(TestProgress.test_id).filter(
TestProgress.status.in_([TestStatus.canceled, TestStatus.completed])
).subquery()
fork_location = f"%/{github_config['repository_owner']}/{github_config['repository']}.git"
fork = Fork.query.filter(Fork.github.like(fork_location)).first()
test = Test.query.filter(
Test.id.notin_(finished_tests), Test.platform == platform, Test.fork_id == fork.id
).order_by(Test.id.asc()).first()
if test is None:
test = Test.query.filter(Test.id.notin_(finished_tests), Test.platform == platform).order_by(
Test.id.asc()).first()
if test is None:
log.info(f'[{platform}] No more tests to run, returning')
return
if test.test_type == TestType.pull_request and test.pr_nr == 0:
log.warn(f'[{platform}] Test {test.id} is invalid, deleting')
db.delete(test)
db.commit()
return
# Reset to snapshot
if vm.hasCurrentSnapshot() != 1:
log.critical(f"[{platform}] VM {kvm_name} has no current snapshot set!")
return
snapshot = vm.snapshotCurrent()
if vm.revertToSnapshot(snapshot) == -1:
log.critical(f"[{platform}] Failed to revert to {snapshot.getName()} for {kvm_name}")
return
log.info(f"[{platform}] Reverted to {snapshot.getName()} for {kvm_name}")
log.debug(f'[{platform}] Starting test {test.id}')
status = Kvm(kvm_name, test.id)
# Prepare data
# 0) Write url to file
with app.app_context():
full_url = url_for('ci.progress_reporter', test_id=test.id, token=test.token, _external=True, _scheme="https")
file_path = os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'vm_data', kvm_name, 'reportURL')
with open(file_path, 'w') as f:
f.write(full_url)
# 1) Generate test files
base_folder = os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'vm_data', kvm_name, 'ci-tests')
categories = Category.query.order_by(Category.id.desc()).all()
commit_name = 'fetch_commit_' + platform.value
commit_hash = GeneralData.query.filter(GeneralData.key == commit_name).first().value
last_commit = Test.query.filter(and_(Test.commit == commit_hash, Test.platform == platform)).first()
log.debug(f"[{platform}] We will compare against the results of test {last_commit.id}")
regression_ids = test.get_customized_regressiontests()
# Init collection file
multi_test = etree.Element('multitest')
for category in categories:
# Skip categories without tests
if len(category.regression_tests) == 0:
continue
# Create XML file for test
file_name = f'{category.name}.xml'
single_test = etree.Element('tests')
should_write_xml = False
for regression_test in category.regression_tests:
if regression_test.id not in regression_ids:
log.debug(f'Skipping RT #{regression_test.id} ({category.name}) as not in scope')
continue
should_write_xml = True
entry = etree.SubElement(single_test, 'entry', id=str(regression_test.id))
command = etree.SubElement(entry, 'command')
command.text = regression_test.command
input_node = etree.SubElement(entry, 'input', type=regression_test.input_type.value)
# Need a path that is relative to the folder we provide inside the CI environment.
input_node.text = regression_test.sample.filename
output_node = etree.SubElement(entry, 'output')
output_node.text = regression_test.output_type.value
compare = etree.SubElement(entry, 'compare')
last_files = TestResultFile.query.filter(and_(
TestResultFile.test_id == last_commit.id,
TestResultFile.regression_test_id == regression_test.id
)).subquery()
for output_file in regression_test.output_files:
ignore_file = str(output_file.ignore).lower()
file_node = etree.SubElement(compare, 'file', ignore=ignore_file, id=str(output_file.id))
last_commit_files = db.query(last_files.c.got).filter(and_(
last_files.c.regression_test_output_id == output_file.id,
last_files.c.got.isnot(None)
)).first()
correct = etree.SubElement(file_node, 'correct')
# Need a path that is relative to the folder we provide inside the CI environment.
if last_commit_files is None:
log.debug(f"Selecting original file for RT #{regression_test.id} ({category.name})")
correct.text = output_file.filename_correct
else:
correct.text = output_file.create_correct_filename(last_commit_files[0])
expected = etree.SubElement(file_node, 'expected')
expected.text = output_file.filename_expected(regression_test.sample.sha)
if not should_write_xml:
continue
save_xml_to_file(single_test, base_folder, file_name)
# Append to collection file
test_file = etree.SubElement(multi_test, 'testfile')
location = etree.SubElement(test_file, 'location')
location.text = file_name
save_xml_to_file(multi_test, base_folder, 'TestAll.xml')
# 2) Create git repo clone and merge PR into it (if necessary)
try:
repo = Repo(os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'vm_data', kvm_name, 'unsafe-ccextractor'))
except InvalidGitRepositoryError:
log.critical(f"[{platform}] Could not open CCExtractor's repository copy!")
return
# Return to master
repo.heads.master.checkout(True)
# Update repository from upstream
try:
github_url = test.fork.github
if is_main_repo(github_url):
origin = repo.remote('origin')
else:
fork_id = test.fork.id
remote = f'fork_{fork_id}'
if remote in [remote.name for remote in repo.remotes]:
origin = repo.remote(remote)
else:
origin = repo.create_remote(remote, url=github_url)
except ValueError:
log.critical(f"[{platform}] Origin remote doesn't exist!")
return
fetch_info = origin.fetch()
if len(fetch_info) == 0:
log.info(f'[{platform}] Fetch from remote returned no new data...')
# Checkout to Remote Master
repo.git.checkout(origin.refs.master)
# Pull code (finally)
pull_info = origin.pull('master')
if len(pull_info) == 0:
log.info(f"[{platform}] Pull from remote returned no new data...")
elif pull_info[0].flags > 128:
log.critical(f"[{platform}] Did not pull any information from remote: {pull_info[0].flags}!")
return
ci_branch = 'CI_Branch'
# Delete the test branch if it exists, and recreate
try:
repo.delete_head(ci_branch, force=True)
except GitCommandError:
log.info(f"[{platform}] Could not delete CI_Branch head")
# Remove possible left rebase-apply directory
try:
shutil.rmtree(os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'unsafe-ccextractor', '.git', 'rebase-apply'))
except OSError:
log.info(f"[{platform}] Could not delete rebase-apply")
# If PR, merge, otherwise reset to commit
if test.test_type == TestType.pull_request:
# Fetch PR (stored under origin/pull/<id>/head)
pull_info = origin.fetch(f'pull/{test.pr_nr}/head:{ci_branch}')
if len(pull_info) == 0:
log.warn(f"[{platform}] Did not pull any information from remote PR!")
elif pull_info[0].flags > 128:
log.critical(f"[{platform}] Did not pull any information from remote PR: {pull_info[0].flags}!")
return
try:
test_branch = repo.heads[ci_branch]
except IndexError:
log.critical(f'{ci_branch} does not exist')
return
test_branch.checkout(True)
try:
pull = repository.pulls(f'{test.pr_nr}').get()
except ApiError as a:
log.error(f'Got an exception while fetching the PR payload! Message: {a.message}')
return
if pull['mergeable'] is False:
progress = TestProgress(test.id, TestStatus.canceled, "Commit could not be merged", datetime.datetime.now())
db.add(progress)
db.commit()
try:
with app.app_context():
repository.statuses(test.commit).post(
state=Status.FAILURE,
description="Tests canceled due to merge conflict",
context=f"CI - {test.platform.value}",
target_url=url_for('test.by_id', test_id=test.id, _external=True)
)
except ApiError as a:
log.error(f'Got an exception while posting to GitHub! Message: {a.message}')
return
# Merge on master if no conflict
repo.git.merge('master')
else:
test_branch = repo.create_head(ci_branch, origin.refs.master)
test_branch.checkout(True)
try:
repo.head.reset(test.commit, working_tree=True)
except GitCommandError:
log.warn(f"[{platform}] Commit {test.commit} for test {test.id} does not exist!")
return
# Power on machine
try:
vm.create()
db.add(status)
db.commit()
except libvirt.libvirtError as e:
log.critical(f"[{platform}] Failed to launch VM {kvm_name}")
log.critical(f"Information about failure: code: {e.get_error_code()}, domain: {e.get_error_domain()}, "
f"level: {e.get_error_level()}, message: {e.get_error_message()}")
except IntegrityError:
log.warn(f"[{platform}] Duplicate entry for {test.id}")
# Close connection to libvirt
conn.close()
def save_xml_to_file(xml_node, folder_name, file_name) -> None:
"""
Save the given XML node to a file in a certain folder.
:param xml_node: The XML content element to write to the file.
:type xml_node: Element
:param folder_name: The folder name.
:type folder_name: str
:param file_name: The name of the file
:type file_name: str
:return: Nothing
:rtype: None
"""
xml_node.getroottree().write(
os.path.join(folder_name, file_name), encoding='utf-8', xml_declaration=True, pretty_print=True
)
def queue_test(db, gh_commit, commit, test_type, branch="master", pr_nr=0) -> None:
"""
Store test details into Test model for each platform, and post the status to GitHub.
:param db: Database connection.
:type db: sqlalchemy.orm.scoped_session
:param gh_commit: The GitHub API call for the commit. Can be None
:type gh_commit: Any
:param commit: The commit hash.
:type commit: str
:param test_type: The type of test
:type test_type: TestType
:param branch: Branch name
:type branch: str
:param pr_nr: Pull Request number, if applicable.
:type pr_nr: int
:return: Nothing
:rtype: None
"""
from run import log
fork_url = f"%/{g.github['repository_owner']}/{g.github['repository']}.git"
fork = Fork.query.filter(Fork.github.like(fork_url)).first()
if test_type == TestType.pull_request:
log.debug('pull request test type detected')
branch = "pull_request"
linux_test = Test(TestPlatform.linux, test_type, fork.id, branch, commit, pr_nr)
db.add(linux_test)
windows_test = Test(TestPlatform.windows, test_type, fork.id, branch, commit, pr_nr)
db.add(windows_test)
db.commit()
add_customized_regression_tests(linux_test.id)
add_customized_regression_tests(windows_test.id)
if gh_commit is not None:
status_entries = {
linux_test.platform.value: linux_test.id,
windows_test.platform.value: windows_test.id
}
for platform_name, test_id in status_entries.items():
try:
gh_commit.post(
state=Status.PENDING,
description="Tests queued",
context=f"CI - {platform_name}",
target_url=url_for('test.by_id', test_id=test_id, _external=True)
)
except ApiError as a:
log.critical(f'Could not post to GitHub! Response: {a.response}')
log.debug("Created tests, waiting for cron...")
def inform_mailing_list(mailer, id, title, author, body) -> None:
"""
Send mail to subscribed users when a issue is opened via the Webhook.
:param mailer: The mailer instance
:type mailer: Mailer
:param id: ID of the Issue Opened
:type id: int
:param title: Title of the Created Issue
:type title: str
:param author: The Authors Username of the Issue
:type author: str
:param body: The Content of the Issue
:type body: str
"""
from run import get_github_issue_link
subject = f"GitHub Issue #{id}"
url = get_github_issue_link(id)
if not mailer.send_simple_message({
"to": "ccextractor-dev@googlegroups.com",
"subject": subject,
"html": get_html_issue_body(title=title, author=author, body=body, issue_number=id, url=url)
}):
g.log.error('failed to send issue to mailing list')
def get_html_issue_body(title, author, body, issue_number, url) -> Any:
"""
Curate a HTML formatted body for the issue mail.
:param title: title of the issue
:type title: str
:param author: author of the issue
:type author: str
:param body: content of the issue
:type body: str
:param issue_number: issue number
:type issue_number: int
:param url: link to the issue
:type url: str
:return: email body in html format
:rtype: str
"""
from run import app
html_issue_body = markdown(body, extras=["target-blank-links", "task_list", "code-friendly"])
template = app.jinja_env.get_or_select_template("email/new_issue.txt")
html_email_body = template.render(title=title, author=author, body=html_issue_body, url=url)
return html_email_body
@mod_ci.route('/start-ci', methods=['GET', 'POST'])
@request_from_github()
def start_ci():
"""
Perform various actions when the GitHub webhook is triggered.
Reaction to the next events need to be processed
(after verification):
- Ping (for fun)
- Push
- Pull Request
- Issues
"""
if request.method != 'POST':
return 'OK'
else:
abort_code = 418
event = request.headers.get('X-GitHub-Event')
if event == "ping":
g.log.debug('server ping successful')
return json.dumps({'msg': 'Hi!'})
x_hub_signature = request.headers.get('X-Hub-Signature')
if not is_valid_signature(x_hub_signature, request.data, g.github['ci_key']):
g.log.warning(f'CI signature failed: {x_hub_signature}')
abort(abort_code)
payload = request.get_json()
if payload is None:
g.log.warning(f'CI payload is empty')
abort(abort_code)
gh = GitHub(access_token=g.github['bot_token'])
repository = gh.repos(g.github['repository_owner'])(g.github['repository'])
if event == "push":
g.log.debug('push event detected')
if 'after' in payload:
commit_hash = payload['after']
github_status = repository.statuses(commit_hash)
# Update the db to the new last commit
ref = repository.git().refs('heads/master').get()
last_commit = GeneralData.query.filter(GeneralData.key == 'last_commit').first()
for platform in TestPlatform.values():
commit_name = 'fetch_commit_' + platform
fetch_commit = GeneralData.query.filter(GeneralData.key == commit_name).first()
if fetch_commit is None:
prev_commit = GeneralData(commit_name, last_commit.value)
g.db.add(prev_commit)
last_commit.value = ref['object']['sha']
g.db.commit()
queue_test(g.db, github_status, commit_hash, TestType.commit)
else:
g.log.warning('Unknown push type! Dumping payload for analysis')
g.log.warning(payload)
elif event == "pull_request":
g.log.debug('Pull Request event detected')
# If it's a valid PR, run the tests
pr_nr = payload['pull_request']['number']
if payload['action'] in ['opened', 'synchronize', 'reopened']:
try:
commit_hash = payload['pull_request']['head']['sha']
github_status = repository.statuses(commit_hash)
except KeyError:
g.log.error("Didn't find a SHA value for a newly opened PR!")
g.log.error(payload)
return 'ERROR'
# Check if user blacklisted
user_id = payload['pull_request']['user']['id']
if BlockedUsers.query.filter(BlockedUsers.user_id == user_id).first() is not None:
g.log.warning("User Blacklisted")
github_status.post(
state=Status.ERROR,
description="CI start aborted. You may be blocked from accessing this functionality",
target_url=url_for('home.index', _external=True)
)
return 'ERROR'
queue_test(g.db, github_status, commit_hash, TestType.pull_request, pr_nr=pr_nr)
elif payload['action'] == 'closed':
g.log.debug('PR was closed, no after hash available')
# Cancel running queue
tests = Test.query.filter(Test.pr_nr == pr_nr).all()
for test in tests:
# Add cancelled status only if the test hasn't started yet
if len(test.progress) > 0:
continue
progress = TestProgress(test.id, TestStatus.canceled, "PR closed", datetime.datetime.now())
g.db.add(progress)
repository.statuses(test.commit).post(
state=Status.FAILURE,
description="Tests canceled",
context=f"CI - {test.platform.value}",
target_url=url_for('test.by_id', test_id=test.id, _external=True)
)
elif event == "issues":
g.log.debug('issues event detected')
issue_data = payload['issue']
issue_action = payload['action']
issue = Issue.query.filter(Issue.issue_id == issue_data['number']).first()
issue_title = issue_data['title']
issue_id = issue_data['number']
issue_author = issue_data['user']['login']
issue_body = issue_data['body']
if issue_action == "opened":
inform_mailing_list(g.mailer, issue_id, issue_title, issue_author, issue_body)
if issue is not None:
issue.title = issue_title
issue.status = issue_data['state']
g.db.commit()
elif event == "release":
g.log.debug("Release webhook triggered")
release_data = payload['release']
action = payload['action']
release_version = release_data['tag_name']
if release_version[0] == 'v':
release_version = release_version[1:]
if action == "prereleased":
g.log.debug("Ignoring event meant for pre-release")
elif action in ["deleted", "unpublished"]:
g.log.debug("Received delete/unpublished action")
CCExtractorVersion.query.filter_by(version=release_version).delete()
g.db.commit()
g.log.info(f"Successfully deleted release {release_version} on {action} action")
elif action in ["edited", "published"]:
g.log.debug(f"Latest release version is {release_version}")
release_commit = GeneralData.query.filter(GeneralData.key == 'last_commit').first().value
release_date = release_data['published_at']
if action == "edited":
release = CCExtractorVersion.query.filter(CCExtractorVersion.version == release_version).one()
release.released = datetime.datetime.strptime(release_date, '%Y-%m-%dT%H:%M:%SZ').date()
release.commit = release_commit
else:
release = CCExtractorVersion(release_version, release_date, release_commit)
g.db.add(release)
g.db.commit()
g.log.info(f"Successfully updated release version with webhook action '{action}'")
# adding test corresponding to last commit to the baseline regression results
# this is not altered when a release is deleted or unpublished since it's based on commit
test = Test.query.filter(and_(Test.commit == release_commit,
Test.platform == TestPlatform.linux)).first()
test_result_file = g.db.query(TestResultFile).filter(TestResultFile.test_id == test.id).subquery()
test_result = g.db.query(TestResult).filter(TestResult.test_id == test.id).subquery()
g.db.query(RegressionTestOutput.correct).filter(
and_(RegressionTestOutput.regression_id == test_result_file.c.regression_test_id,
test_result_file.c.got is not None)).values(test_result_file.c.got)
g.db.query(RegressionTest.expected_rc).filter(
RegressionTest.id == test_result.c.regression_test_id
).values(test_result.c.expected_rc)
g.db.commit()
g.log.info("Successfully added tests for latest release!")
else:
g.log.warning(f"Unsupported release action: {action}")
else:
g.log.warning(f'CI unrecognized event: {event}')
return json.dumps({'msg': 'EOL'})
def update_build_badge(status, test) -> None:
"""
Build status badge for current test to be displayed on sample-platform.
:param status: current testing status
:type status: str
:param test: current commit that is tested
:type test: Test
:return: null
:rtype: null
"""
if test.test_type == TestType.commit and is_main_repo(test.fork.github):
parent_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
original_location = os.path.join(parent_dir, 'static', 'svg', f'{status.upper()}-{test.platform.values}.svg')
build_status_location = os.path.join(parent_dir, 'static', 'img', 'status', f'build-{test.platform.values}.svg')
shutil.copyfile(original_location, build_status_location)
g.log.info('Build badge updated successfully!')
@mod_ci.route('/progress-reporter/<test_id>/<token>', methods=['POST'])
def progress_reporter(test_id, token):
"""
Handle the progress of a certain test after validating the token. If necessary, update the status on GitHub.
:param test_id: The id of the test to update.
:type test_id: int
:param token: The token to check the validity of the request.
:type token: str
:return: Nothing.
:rtype: None
"""
from run import config, log
test = Test.query.filter(Test.id == test_id).first()
if test is not None and test.token == token:
repo_folder = config.get('SAMPLE_REPOSITORY', '')
if 'type' in request.form:
if request.form['type'] == 'progress':
log.info('[PROGRESS_REPORTER] Progress reported')
if not progress_type_request(log, test, test_id, request):
return "FAIL"
elif request.form['type'] == 'equality':
log.info('[PROGRESS_REPORTER] Equality reported')
equality_type_request(log, test_id, test, request)
elif request.form['type'] == 'logupload':
log.info('[PROGRESS_REPORTER] Log upload')
if not upload_log_type_request(log, test_id, repo_folder, test, request):
return "EMPTY"
elif request.form['type'] == 'upload':
log.info('[PROGRESS_REPORTER] File upload')
if not upload_type_request(log, test_id, repo_folder, test, request):
return "EMPTY"
elif request.form['type'] == 'finish':
log.info('[PROGRESS_REPORTER] Test finished')
finish_type_request(log, test_id, test, request)
else:
return "FAIL"
return "OK"
return "FAIL"
def progress_type_request(log, test, test_id, request) -> bool:
"""
Handle progress updates for progress reporter.
:param log: logger
:type log: Logger
:param test: concerned test
:type test: Test
:param test_id: The id of the test to update.
:type test_id: int
:param request: Request parameters
:type request: Request
"""
status = TestStatus.from_string(request.form['status'])
current_status = TestStatus.progress_step(status)
message = request.form['message']
if len(test.progress) != 0:
last_status = TestStatus.progress_step(test.progress[-1].status)
if last_status in [TestStatus.completed, TestStatus.canceled]:
return False
if last_status > current_status:
status = TestStatus.canceled # type: ignore
message = "Duplicate Entries"
if last_status < current_status:
# get KVM start time for finding KVM preparation time
kvm_entry = Kvm.query.filter(Kvm.test_id == test_id).first()
if status == TestStatus.building:
log.info('test preparation finished')
prep_finish_time = datetime.datetime.now()
# save preparation finish time
kvm_entry.timestamp_prep_finished = prep_finish_time
g.db.commit()
# set time taken in seconds to do preparation
time_diff = (prep_finish_time - kvm_entry.timestamp).total_seconds()
set_avg_time(test.platform, "prep", time_diff)
elif status == TestStatus.testing:
log.info('test build procedure finished')
build_finish_time = datetime.datetime.now()
# save build finish time
kvm_entry.timestamp_build_finished = build_finish_time
g.db.commit()
# set time taken in seconds to do preparation
time_diff = (build_finish_time - kvm_entry.timestamp_prep_finished).total_seconds()
set_avg_time(test.platform, "build", time_diff)
progress = TestProgress(test.id, status, message)
g.db.add(progress)
g.db.commit()
gh = GitHub(access_token=g.github['bot_token'])
repository = gh.repos(g.github['repository_owner'])(g.github['repository'])
# Store the test commit for testing in case of commit
if status == TestStatus.completed and is_main_repo(test.fork.github):
commit_name = 'fetch_commit_' + test.platform.value
commit = GeneralData.query.filter(GeneralData.key == commit_name).first()
fetch_commit = Test.query.filter(
and_(Test.commit == commit.value, Test.platform == test.platform)
).first()
if test.test_type == TestType.commit and test.id > fetch_commit.id:
commit.value = test.commit
g.db.commit()
# If status is complete, remove the Kvm entry
if status in [TestStatus.completed, TestStatus.canceled]:
log.debug(f"Test {test_id} has been {status}")
var_average = 'average_time_' + test.platform.value
current_average = GeneralData.query.filter(GeneralData.key == var_average).first()
average_time = 0
total_time = 0
if current_average is None:
platform_tests = g.db.query(Test.id).filter(Test.platform == test.platform).subquery()
finished_tests = g.db.query(TestProgress.test_id).filter(
and_(
TestProgress.status.in_([TestStatus.canceled, TestStatus.completed]),
TestProgress.test_id.in_(platform_tests)
)
).subquery()
in_progress_statuses = [TestStatus.preparation, TestStatus.completed, TestStatus.canceled]
finished_tests_progress = g.db.query(TestProgress).filter(
and_(
TestProgress.test_id.in_(finished_tests),
TestProgress.status.in_(in_progress_statuses)
)
).subquery()
times = g.db.query(
finished_tests_progress.c.test_id,
label('time', func.group_concat(finished_tests_progress.c.timestamp))
).group_by(finished_tests_progress.c.test_id).all()
for p in times:
parts = p.time.split(',')
start = datetime.datetime.strptime(parts[0], '%Y-%m-%d %H:%M:%S')
end = datetime.datetime.strptime(parts[-1], '%Y-%m-%d %H:%M:%S')
total_time += int((end - start).total_seconds())
if len(times) != 0:
average_time = total_time // len(times)
new_avg = GeneralData(var_average, average_time)
log.info(f'new average time {str(average_time)} set successfully')
g.db.add(new_avg)
g.db.commit()
else:
all_results = TestResult.query.count()
regression_test_count = RegressionTest.query.count()
number_test = all_results / regression_test_count
updated_average = float(current_average.value) * (number_test - 1)
pr = test.progress_data()
end_time = pr['end']
start_time = pr['start']
if end_time.tzinfo is not None:
end_time = end_time.replace(tzinfo=None)
if start_time.tzinfo is not None:
start_time = start_time.replace(tzinfo=None)
last_running_test = end_time - start_time
updated_average = updated_average + last_running_test.total_seconds()
current_average.value = updated_average // number_test
g.db.commit()
log.info(f'average time updated to {str(current_average.value)}')
kvm = Kvm.query.filter(Kvm.test_id == test_id).first()
if kvm is not None:
log.debug("Removing KVM entry")
g.db.delete(kvm)
g.db.commit()
# Post status update
state = Status.PENDING
target_url = url_for('test.by_id', test_id=test.id, _external=True)
context = f"CI - {test.platform.value}"
if status == TestStatus.canceled:
state = Status.ERROR
message = 'Tests aborted due to an error; please check'
elif status == TestStatus.completed:
# Determine if success or failure
# It fails if any of these happen:
# - A crash (unexpected exit code)
# - A not None value on the "got" of a TestResultFile (
# meaning the hashes do not match)
crashes = g.db.query(count(TestResult.exit_code)).filter(
and_(
TestResult.test_id == test.id,
TestResult.exit_code != TestResult.expected_rc
)).scalar()
results_zero_rc = g.db.query(RegressionTest.id).filter(
RegressionTest.expected_rc == 0
).subquery()
results = g.db.query(count(TestResultFile.got)).filter(
and_(
TestResultFile.test_id == test.id,
TestResultFile.regression_test_id.in_(results_zero_rc),
TestResultFile.got.isnot(None)
)
).scalar()
log.debug(f'Test {test.id} completed: {crashes} crashes, {results} results')
if crashes > 0 or results > 0:
state = Status.FAILURE
message = 'Not all tests completed successfully, please check'
else:
state = Status.SUCCESS
message = 'Tests completed'
if test.test_type == TestType.pull_request:
comment_pr(test.id, state, test.pr_nr, test.platform.name)
update_build_badge(state, test)
else:
message = progress.message
gh_commit = repository.statuses(test.commit)
try:
gh_commit.post(state=state, description=message, context=context, target_url=target_url)
except ApiError as a:
log.error(f'Got an exception while posting to GitHub! Message: {a.message}')
if status in [TestStatus.completed, TestStatus.canceled]:
# Start next test if necessary, on the same platform
start_platforms(g.db, repository, 60, test.platform)
return True
def equality_type_request(log, test_id, test, request):
"""
Handle equality request type for progress reporter.
:param log: logger
:type log: Logger
:param test_id: The id of the test to update.
:type test_id: int
:param test: concerned test
:type test: Test
:param request: Request parameters
:type request: Request
"""
log.debug(f'Equality for {test_id}/{request.form["test_id"]}/{request.form["test_file_id"]}')
rto = RegressionTestOutput.query.filter(RegressionTestOutput.id == request.form['test_file_id']).first()
if rto is None:
# Equality posted on a file that's ignored presumably
log.info(f'No rto for {test_id}: {request.form["test_id"]}')
else:
result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct)
g.db.add(result_file)
g.db.commit()
def upload_log_type_request(log, test_id, repo_folder, test, request) -> bool:
"""
Handle logupload request type for progress reporter.
:param log: logger
:type log: Logger
:param test_id: The id of the test to update.
:type test_id: int
:param repo_folder: repository folder
:type repo_folder: str
:param test: concerned test
:type test: Test
:param request: Request parameters
:type request: Request
"""
log.debug(f"Received log file for test {test_id}")
# File upload, process
if 'file' in request.files:
uploaded_file = request.files['file']
filename = secure_filename(uploaded_file.filename)
if filename == '':
return False
temp_path = os.path.join(repo_folder, 'TempFiles', filename)
# Save to temporary location
uploaded_file.save(temp_path)
final_path = os.path.join(repo_folder, 'LogFiles', f"{test.id}.txt")
os.rename(temp_path, final_path)
log.debug("Stored log file")
return True
return False
def upload_type_request(log, test_id, repo_folder, test, request) -> bool:
"""
Handle upload request type for progress reporter.
:param log: logger
:type log: Logger
:param test_id: The id of the test to update.
:type test_id: int
:param repo_folder: repository folder
:type repo_folder: str
:param test: concerned test
:type test: Test
:param request: Request parameters
:type request: Request
"""
log.debug(f'Upload for {test_id}/{request.form["test_id"]}/{request.form["test_file_id"]}'
)
# File upload, process
if 'file' in request.files:
uploaded_file = request.files['file']
filename = secure_filename(uploaded_file.filename)
if filename == '':
log.warning('empty filename provided for uploading')
return False
temp_path = os.path.join(repo_folder, 'TempFiles', filename)
# Save to temporary location
uploaded_file.save(temp_path)
# Get hash and check if it's already been submitted
hash_sha256 = hashlib.sha256()
with open(temp_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_sha256.update(chunk)
file_hash = hash_sha256.hexdigest()
filename, file_extension = os.path.splitext(filename)
final_path = os.path.join(
repo_folder, 'TestResults', f'{file_hash}{file_extension}'
)
os.rename(temp_path, final_path)
rto = RegressionTestOutput.query.filter(
RegressionTestOutput.id == request.form['test_file_id']).first()
result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct, file_hash)
g.db.add(result_file)
g.db.commit()
return True
return False
def finish_type_request(log, test_id, test, request):
"""
Handle finish request type for progress reporter.
:param log: logger
:type log: Logger
:param test_id: The id of the test to update.
:type test_id: int
:param test: concerned test
:type test: Test
:param request: Request parameters
:type request: Request
"""
log.debug(f"Finish for {test_id}/{request.form['test_id']}")
regression_test = RegressionTest.query.filter(RegressionTest.id == request.form['test_id']).first()
result = TestResult(
test.id, regression_test.id, request.form['runTime'],
request.form['exitCode'], regression_test.expected_rc
)
g.db.add(result)
try:
g.db.commit()
except IntegrityError as e:
log.error(f"Could not save the results: {e}")
def set_avg_time(platform, process_type: str, time_taken: int) -> None:
"""
Set average platform preparation time.
:param platform: platform to which the average time belongs
:type platform: TestPlatform
:param process_type: process to save the average time for
:type process_type: str
:param time_taken: time taken to complete the process
:type time_taken: int
"""
val_key = "avg_" + str(process_type) + "_time_" + platform.value
count_key = "avg_" + str(process_type) + "_count_" + platform.value
current_avg_count = GeneralData.query.filter(GeneralData.key == count_key).first()
# adding average data the first time
if current_avg_count is None:
avg_count_GD = GeneralData(count_key, str(1))
avg_time_GD = GeneralData(val_key, str(time_taken))
g.db.add(avg_count_GD)
g.db.add(avg_time_GD)
else:
current_average = GeneralData.query.filter(GeneralData.key == val_key).first()
avg_count = int(current_avg_count.value)
avg_value = int(float(current_average.value))
new_average = ((avg_value * avg_count) + time_taken) / (avg_count + 1)
current_avg_count.value = str(avg_count + 1)
current_average.value = str(new_average)
g.db.commit()
def comment_pr(test_id, state, pr_nr, platform) -> None:
"""
Upload the test report to the GitHub PR as comment.
:param test_id: The identity of Test whose report will be uploaded
:type test_id: str
:param state: The state of the PR.
:type state: Status
:param pr_nr: PR number to which test commit is related and comment will be uploaded
:type: str
:param platform
:type: str
"""
from run import app, log
regression_testid_passed = g.db.query(TestResult.regression_test_id).outerjoin(
TestResultFile, TestResult.test_id == TestResultFile.test_id).filter(
TestResult.test_id == test_id,
TestResult.expected_rc == TestResult.exit_code,
or_(
TestResult.exit_code != 0,
and_(TestResult.exit_code == 0,
TestResult.regression_test_id == TestResultFile.regression_test_id,
TestResultFile.got.is_(None)
),
and_(
RegressionTestOutput.regression_id == TestResult.regression_test_id,
RegressionTestOutput.ignore.is_(True),
))).subquery()
passed = g.db.query(label('category_id', Category.id), label(
'success', count(regressionTestLinkTable.c.regression_id))).filter(
regressionTestLinkTable.c.regression_id.in_(regression_testid_passed),
Category.id == regressionTestLinkTable.c.category_id).group_by(
regressionTestLinkTable.c.category_id).subquery()
tot = g.db.query(label('category', Category.name), label('total', count(regressionTestLinkTable.c.regression_id)),
label('success', passed.c.success)).outerjoin(
passed, passed.c.category_id == Category.id).filter(
Category.id == regressionTestLinkTable.c.category_id).group_by(
regressionTestLinkTable.c.category_id).all()
regression_testid_failed = RegressionTest.query.filter(RegressionTest.id.notin_(regression_testid_passed)).all()
template = app.jinja_env.get_or_select_template('ci/pr_comment.txt')
message = template.render(tests=tot, failed_tests=regression_testid_failed, test_id=test_id,
state=state, platform=platform)
log.debug(f"GitHub PR Comment Message Created for Test_id: {test_id}")
try:
gh = GitHub(access_token=g.github['bot_token'])
repository = gh.repos(g.github['repository_owner'])(g.github['repository'])
# Pull requests are just issues with code, so GitHub considers PR comments in issues
pull_request = repository.issues(pr_nr)
comments = pull_request.comments().get()
bot_name = g.github['bot_name']
comment_id = None
for comment in comments:
if comment['user']['login'] == bot_name and platform in comment['body']:
comment_id = comment['id']
break
log.debug(f"GitHub PR Comment ID Fetched for Test_id: {test_id}")
if comment_id is None:
comment = pull_request.comments().post(body=message)
comment_id = comment['id']
else:
repository.issues().comments(comment_id).post(body=message)
log.debug(f"GitHub PR Comment ID {comment_id} Uploaded for Test_id: {test_id}")
except Exception as e:
log.error(f"GitHub PR Comment Failed for Test_id: {test_id} with Exception {e}")
@mod_ci.route('/show_maintenance')
@login_required
@check_access_rights([Role.admin])
@template_renderer('ci/maintenance.html')
def show_maintenance():
"""
Get list of Virtual Machines under maintenance.
:return: platforms in maintenance
:rtype: dict
"""
return {
'platforms': MaintenanceMode.query.all()
}
@mod_ci.route('/blocked_users', methods=['GET', 'POST'])
@login_required
@check_access_rights([Role.admin])
@template_renderer()
def blocked_users():
"""
Render the blocked_users template.
This returns a list of all currently blacklisted users.
Also defines processing of forms to add/remove users from blacklist.
When a user is added to blacklist, removes queued tests on any PR by the user.
"""
blocked_users = BlockedUsers.query.order_by(BlockedUsers.user_id)
# Initialize usernames dictionary
usernames = {u.user_id: "Error, cannot get username" for u in blocked_users}
for key in usernames.keys():
# Fetch usernames from GitHub API
try:
api_url = requests.get(f"https://api.github.com/user/{key}", timeout=10)
userdata = api_url.json()
# Set values to the actual usernames if no errors
usernames[key] = userdata['login']
except requests.exceptions.RequestException:
break
# Define addUserForm processing
add_user_form = AddUsersToBlacklist()
if add_user_form.add.data and add_user_form.validate_on_submit():
if BlockedUsers.query.filter_by(user_id=add_user_form.user_id.data).first() is not None:
flash('User already blocked.')
return redirect(url_for('.blocked_users'))
blocked_user = BlockedUsers(add_user_form.user_id.data, add_user_form.comment.data)
g.db.add(blocked_user)
g.db.commit()
flash('User blocked successfully.')
try:
# Remove any queued pull request from blocked user
gh = GitHub(access_token=g.github['bot_token'])
repository = gh.repos(g.github['repository_owner'])(g.github['repository'])
# Getting all pull requests by blocked user on the repo
pulls = repository.pulls.get()
for pull in pulls:
if pull['user']['id'] != add_user_form.user_id.data:
continue
tests = Test.query.filter(Test.pr_nr == pull['number']).all()
for test in tests:
# Add canceled status only if the test hasn't started yet
if len(test.progress) > 0:
continue
progress = TestProgress(test.id, TestStatus.canceled, "PR closed", datetime.datetime.now())
g.db.add(progress)
g.db.commit()
try:
repository.statuses(test.commit).post(
state=Status.FAILURE,
description="Tests canceled since user blacklisted",
context=f"CI - {test.platform.value}",
target_url=url_for('test.by_id', test_id=test.id, _external=True)
)
except ApiError as a:
g.log.error(f"Got an exception while posting to GitHub! Message: {a.message}")
except ApiError as a:
g.log.error(f"Pull Requests of Blocked User could not be fetched: {a.response}")
return redirect(url_for('.blocked_users'))
# Define removeUserForm processing
remove_user_form = RemoveUsersFromBlacklist()
if remove_user_form.remove.data and remove_user_form.validate_on_submit():
blocked_user = BlockedUsers.query.filter_by(user_id=remove_user_form.user_id.data).first()
if blocked_user is None:
flash("No such user in Blacklist")
return redirect(url_for('.blocked_users'))
g.db.delete(blocked_user)
g.db.commit()
flash("User removed successfully.")
return redirect(url_for('.blocked_users'))
return{
'addUserForm': add_user_form,
'removeUserForm': remove_user_form,
'blocked_users': blocked_users,
'usernames': usernames
}
@mod_ci.route('/toggle_maintenance/<platform>/<status>')
@login_required
@check_access_rights([Role.admin])
def toggle_maintenance(platform, status):
"""
Toggle maintenance mode for a platform.
:param platform: name of the platform
:type platform: str
:param status: current maintenance status
:type status: str
:return: success response if successful, failure response otherwise
:rtype: JSON
"""
result = 'failed'
message = 'Platform Not found'
disabled = status == 'True'
try:
platform = TestPlatform.from_string(platform)
db_mode = MaintenanceMode.query.filter(MaintenanceMode.platform == platform).first()
if db_mode is not None:
db_mode.disabled = disabled
g.db.commit()
result = 'success'
message = f'{platform.description} in maintenance? {"Yes" if disabled else "No"}'
except ValueError:
pass
return jsonify({
'status': result,
'message': message
})
@mod_ci.route('/maintenance-mode/<platform>')
def in_maintenance_mode(platform):
"""
Check if platform in maintenance mode.
:param platform: name of the platform
:type platform: str
:return: status of the platform
:rtype: str
"""
try:
platform = TestPlatform.from_string(platform)
except ValueError:
return 'ERROR'
status = MaintenanceMode.query.filter(MaintenanceMode.platform == platform).first()
if status is None:
status = MaintenanceMode(platform, False)
g.db.add(status)
g.db.commit()
return str(status.disabled)
def is_main_repo(repo_url) -> bool:
"""
Check whether a repo_url links to the main repository or not.
:param repo_url: url of fork/main repository of the user
:type repo_url: str
:return: checks whether url of main repo is same or not
:rtype: bool
"""
from run import config, get_github_config
gh_config = get_github_config(config)
return f'{gh_config["repository_owner"]}/{gh_config["repository"]}' in repo_url
def add_customized_regression_tests(test_id) -> None:
"""
Run custom regression tests.
:param test_id: id of the test
:type test_id: int
"""
active_regression_tests = RegressionTest.query.filter(RegressionTest.active == 1).all()
for regression_test in active_regression_tests:
g.log.debug(f'Adding RT #{regression_test.id} to test {test_id}')
customized_test = CustomizedTest(test_id, regression_test.id)
g.db.add(customized_test)
g.db.commit()
|
sensordata.py | #!/usr/bin/env python3
#
# sensordata.py
#
# Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# import useful stuff
#
import boto3
import datetime
import json
import logging
import random
import sys
import threading
import time
#
# globals
#
TOPIC_BASE = 'dt/sensor'
C_IOT_DATA = boto3.client('iot-data', region_name='us-east-1')
SENSORS = {
'sensor_01': {'building': 'Day 1', 'room': '2.01'},
'sensor_02': {'building': 'Day 1', 'room': '10.01'},
'sensor_03': {'building': 'Day 1', 'room': '11.02'},
'sensor_04': {'building': 'Kumo', 'room': '12.12'},
'sensor_05': {'building': 'Kumo', 'room': '15.07'},
'sensor_06': {'building': 'Kumo', 'room': '00.22'},
'sensor_07': {'building': 'Doppler', 'room': '14.10'},
'sensor_08': {'building': 'Doppler', 'room': '15.11'},
'sensor_09': {'building': 'Doppler', 'room': '16.12'},
'sensor_10': {'building': 'Doppler', 'room': '17.14'}
}
#
# Configure logging
#
logger = logging.getLogger("AWSIoTPythonSDK.core")
logger.setLevel(logging.INFO)
streamHandler = logging.StreamHandler()
formatter = logging.Formatter("[%(asctime)s - %(levelname)s - %(filename)s:%(lineno)s - %(funcName)s - %(message)s")
streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
def sensor_data():
message = {}
message['temperature'] = random.uniform(15,35)
message['humidity'] = random.uniform(30,70)
message['pressure'] = random.uniform(900,1150)
return message
def send_sensor_data(sensor):
while True:
try:
message = sensor_data()
message['device_id'] = sensor
message['building'] = SENSORS[sensor]['building']
message['room'] = SENSORS[sensor]['room']
topic = '{}/{}'.format(TOPIC_BASE, sensor)
logger.info("publish: topic: {} message: {}".format(topic, message))
response = C_IOT_DATA.publish(topic=topic, qos=0, payload=json.dumps(message))
logger.info("response: {}".format(response))
except Exception as e:
logger.error("{}".format(e))
time.sleep(2)
for sensor in SENSORS.keys():
logger.info("starting thread for sensor: {}".format(sensor))
threading.Thread(target=send_sensor_data,args=(sensor,)).start()
start_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
while True:
logger.info("{}: start_time: {} now: {} threads:".format(__file__, start_time, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
for t in threading.enumerate():
logger.info(" {}".format(t))
time.sleep(30)
|
trainer_utils.py | # coding=utf-8
# Copyright 2020-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utilities for the Trainer and TFTrainer class. Should be independent from PyTorch and TensorFlow.
"""
import copy
import functools
import gc
import inspect
import os
import random
import re
import threading
import time
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
import numpy as np
from .utils import (
ExplicitEnum,
is_psutil_available,
is_tf_available,
is_torch_available,
is_torch_cuda_available,
is_torch_tpu_available,
requires_backends,
)
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
def seed_worker(_):
"""
Helper function to set worker seed during Dataloader initialization.
"""
worker_seed = torch.initial_seed() % 2**32
set_seed(worker_seed)
def enable_full_determinism(seed: int):
"""
Helper function for reproducible behavior during distributed training. See
- https://pytorch.org/docs/stable/notes/randomness.html for pytorch
- https://www.tensorflow.org/api_docs/python/tf/config/experimental/enable_op_determinism for tensorflow
"""
# set seed first
set_seed(seed)
if is_torch_available():
# Enable PyTorch deterministic mode. This potentially requires either the environment
# variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set,
# depending on the CUDA version, so we set them both here
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
torch.use_deterministic_algorithms(True)
# Enable CUDNN deterministic mode
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
if is_tf_available():
tf.config.experimental.enable_op_determinism()
def set_seed(seed: int):
"""
Helper function for reproducible behavior to set the seed in `random`, `numpy`, `torch` and/or `tf` (if installed).
Args:
seed (`int`): The seed to set.
"""
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# ^^ safe to call this function even if cuda is not available
if is_tf_available():
tf.random.set_seed(seed)
class EvalPrediction:
"""
Evaluation output (always contains labels), to be used to compute metrics.
Parameters:
predictions (`np.ndarray`): Predictions of the model.
label_ids (`np.ndarray`): Targets to be matched.
inputs (`np.ndarray`, *optional*)
"""
def __init__(
self,
predictions: Union[np.ndarray, Tuple[np.ndarray]],
label_ids: Union[np.ndarray, Tuple[np.ndarray]],
inputs: Optional[Union[np.ndarray, Tuple[np.ndarray]]] = None,
):
self.predictions = predictions
self.label_ids = label_ids
self.inputs = inputs
def __iter__(self):
if self.inputs is not None:
return iter((self.predictions, self.label_ids, self.inputs))
else:
return iter((self.predictions, self.label_ids))
def __getitem__(self, idx):
if idx < 0 or idx > 2:
raise IndexError("tuple index out of range")
if idx == 2 and self.inputs is None:
raise IndexError("tuple index out of range")
if idx == 0:
return self.predictions
elif idx == 1:
return self.label_ids
elif idx == 2:
return self.inputs
class EvalLoopOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
num_samples: Optional[int]
class PredictionOutput(NamedTuple):
predictions: Union[np.ndarray, Tuple[np.ndarray]]
label_ids: Optional[Union[np.ndarray, Tuple[np.ndarray]]]
metrics: Optional[Dict[str, float]]
class TrainOutput(NamedTuple):
global_step: int
training_loss: float
metrics: Dict[str, float]
PREFIX_CHECKPOINT_DIR = "checkpoint"
_re_checkpoint = re.compile(r"^" + PREFIX_CHECKPOINT_DIR + r"\-(\d+)$")
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [
path
for path in content
if _re_checkpoint.search(path) is not None and os.path.isdir(os.path.join(folder, path))
]
if len(checkpoints) == 0:
return
return os.path.join(folder, max(checkpoints, key=lambda x: int(_re_checkpoint.search(x).groups()[0])))
class IntervalStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class EvaluationStrategy(ExplicitEnum):
NO = "no"
STEPS = "steps"
EPOCH = "epoch"
class HubStrategy(ExplicitEnum):
END = "end"
EVERY_SAVE = "every_save"
CHECKPOINT = "checkpoint"
ALL_CHECKPOINTS = "all_checkpoints"
class BestRun(NamedTuple):
"""
The best run found by an hyperparameter search (see [`~Trainer.hyperparameter_search`]).
Parameters:
run_id (`str`):
The id of the best run (if models were saved, the corresponding checkpoint will be in the folder ending
with run-{run_id}).
objective (`float`):
The objective that was obtained for this run.
hyperparameters (`Dict[str, Any]`):
The hyperparameters picked to get this run.
"""
run_id: str
objective: float
hyperparameters: Dict[str, Any]
def default_compute_objective(metrics: Dict[str, float]) -> float:
"""
The default objective to maximize/minimize when doing an hyperparameter search. It is the evaluation loss if no
metrics are provided to the [`Trainer`], the sum of all metrics otherwise.
Args:
metrics (`Dict[str, float]`): The metrics returned by the evaluate method.
Return:
`float`: The objective to minimize or maximize
"""
metrics = copy.deepcopy(metrics)
loss = metrics.pop("eval_loss", None)
_ = metrics.pop("epoch", None)
# Remove speed metrics
speed_metrics = [m for m in metrics.keys() if m.endswith("_runtime") or m.endswith("_per_second")]
for sm in speed_metrics:
_ = metrics.pop(sm, None)
return loss if len(metrics) == 0 else sum(metrics.values())
def default_hp_space_optuna(trial) -> Dict[str, float]:
from .integrations import is_optuna_available
assert is_optuna_available(), "This function needs Optuna installed: `pip install optuna`"
return {
"learning_rate": trial.suggest_float("learning_rate", 1e-6, 1e-4, log=True),
"num_train_epochs": trial.suggest_int("num_train_epochs", 1, 5),
"seed": trial.suggest_int("seed", 1, 40),
"per_device_train_batch_size": trial.suggest_categorical("per_device_train_batch_size", [4, 8, 16, 32, 64]),
}
def default_hp_space_ray(trial) -> Dict[str, float]:
from .integrations import is_ray_tune_available
assert is_ray_tune_available(), "This function needs ray installed: `pip " "install ray[tune]`"
from ray import tune
return {
"learning_rate": tune.loguniform(1e-6, 1e-4),
"num_train_epochs": tune.choice(list(range(1, 6))),
"seed": tune.uniform(1, 40),
"per_device_train_batch_size": tune.choice([4, 8, 16, 32, 64]),
}
def default_hp_space_sigopt(trial):
return [
{"bounds": {"min": 1e-6, "max": 1e-4}, "name": "learning_rate", "type": "double", "transformamtion": "log"},
{"bounds": {"min": 1, "max": 6}, "name": "num_train_epochs", "type": "int"},
{"bounds": {"min": 1, "max": 40}, "name": "seed", "type": "int"},
{
"categorical_values": ["4", "8", "16", "32", "64"],
"name": "per_device_train_batch_size",
"type": "categorical",
},
]
def default_hp_space_wandb(trial) -> Dict[str, float]:
from .integrations import is_wandb_available
if not is_wandb_available():
raise ImportError("This function needs wandb installed: `pip install wandb`")
return {
"method": "random",
"metric": {"name": "objective", "goal": "minimize"},
"parameters": {
"learning_rate": {"distribution": "uniform", "min": 1e-6, "max": 1e-4},
"num_train_epochs": {"distribution": "int_uniform", "min": 1, "max": 6},
"seed": {"distribution": "int_uniform", "min": 1, "max": 40},
"per_device_train_batch_size": {"values": [4, 8, 16, 32, 64]},
},
}
class HPSearchBackend(ExplicitEnum):
OPTUNA = "optuna"
RAY = "ray"
SIGOPT = "sigopt"
WANDB = "wandb"
default_hp_space = {
HPSearchBackend.OPTUNA: default_hp_space_optuna,
HPSearchBackend.RAY: default_hp_space_ray,
HPSearchBackend.SIGOPT: default_hp_space_sigopt,
HPSearchBackend.WANDB: default_hp_space_wandb,
}
def is_main_process(local_rank):
"""
Whether or not the current process is the local process, based on `xm.get_ordinal()` (for TPUs) first, then on
`local_rank`.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.get_ordinal() == 0
return local_rank in [-1, 0]
def total_processes_number(local_rank):
"""
Return the number of processes launched in parallel. Works with `torch.distributed` and TPUs.
"""
if is_torch_tpu_available():
import torch_xla.core.xla_model as xm
return xm.xrt_world_size()
elif local_rank != -1 and is_torch_available():
import torch
return torch.distributed.get_world_size()
return 1
def speed_metrics(split, start_time, num_samples=None, num_steps=None):
"""
Measure and return speed performance metrics.
This function requires a time snapshot `start_time` before the operation to be measured starts and this function
should be run immediately after the operation to be measured has completed.
Args:
- split: name to prefix metric (like train, eval, test...)
- start_time: operation start time
- num_samples: number of samples processed
"""
runtime = time.time() - start_time
result = {f"{split}_runtime": round(runtime, 4)}
if num_samples is not None:
samples_per_second = num_samples / runtime
result[f"{split}_samples_per_second"] = round(samples_per_second, 3)
if num_steps is not None:
steps_per_second = num_steps / runtime
result[f"{split}_steps_per_second"] = round(steps_per_second, 3)
return result
class SchedulerType(ExplicitEnum):
LINEAR = "linear"
COSINE = "cosine"
COSINE_WITH_RESTARTS = "cosine_with_restarts"
POLYNOMIAL = "polynomial"
CONSTANT = "constant"
CONSTANT_WITH_WARMUP = "constant_with_warmup"
class TrainerMemoryTracker:
"""
A helper class that tracks cpu and gpu memory.
This class will silently skip unless `psutil` is available. Install with `pip install psutil`.
When a stage completes, it can pass metrics dict to update with the memory metrics gathered during this stage.
Example :
```python
self._memory_tracker = TrainerMemoryTracker(self.args.skip_memory_metrics)
self._memory_tracker.start()
# code ...
metrics = {"train_runtime": 10.5}
self._memory_tracker.stop_and_update_metrics(metrics)
```
At the moment GPU tracking is only for `pytorch`, but can be extended to support `tensorflow`.
To understand this class' intricacies please read the documentation of [`~Trainer.log_metrics`].
"""
# map trainer methods to metrics prefix
stages = {
"__init__": "init",
"train": "train",
"_inner_training_loop": "train",
"evaluate": "eval",
"predict": "test",
}
def __init__(self, skip_memory_metrics=False):
self.skip_memory_metrics = skip_memory_metrics
if not is_psutil_available():
# soft dependency on psutil
self.skip_memory_metrics = True
if self.skip_memory_metrics:
return
import psutil # noqa
if is_torch_cuda_available():
import torch
self.torch = torch
self.gpu = {}
else:
self.torch = None
self.process = psutil.Process()
self.cur_stage = None
self.cpu = {}
self.init_reported = False
def derive_stage(self):
"""derives the stage/caller name automatically"""
caller = inspect.currentframe().f_back.f_back.f_code.co_name
if caller in self.stages:
return self.stages[caller]
else:
raise ValueError(
f"was called from {caller}, but only expect to be called from one of {self.stages.keys()}"
)
def cpu_mem_used(self):
"""get resident set size memory for the current process"""
return self.process.memory_info().rss
def peak_monitor_func(self):
self.cpu_mem_used_peak = -1
while True:
self.cpu_mem_used_peak = max(self.cpu_mem_used(), self.cpu_mem_used_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
# time.sleep(0.001) # 1msec
if not self.peak_monitoring:
break
def start(self):
"""start tracking for the caller's stage"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
self.cur_stage = stage
gc.collect()
if self.torch is not None:
self.torch.cuda.reset_peak_memory_stats()
self.torch.cuda.empty_cache()
# gpu
if self.torch is not None:
self.gpu_mem_used_at_start = self.torch.cuda.memory_allocated()
# cpu
self.cpu_mem_used_at_start = self.cpu_mem_used()
self.peak_monitoring = True
peak_monitor_thread = threading.Thread(target=self.peak_monitor_func)
peak_monitor_thread.daemon = True
peak_monitor_thread.start()
def stop(self, stage):
"""stop tracking for the passed stage"""
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# this sends a signal to peak_monitor_func to complete its loop
self.peak_monitoring = False
# first ensure all objects get collected and their memory is freed
gc.collect()
if self.torch is not None:
self.torch.cuda.empty_cache()
# concepts:
# - alloc_delta: the difference of allocated memory between the end and the start
# - peaked_delta: the difference between the peak memory and the current memory
# in order to know how much memory the measured code consumed one needs to sum these two
# gpu
if self.torch is not None:
self.gpu_mem_used_now = self.torch.cuda.memory_allocated()
self.gpu_mem_used_peak = self.torch.cuda.max_memory_allocated()
self.gpu[self.cur_stage] = dict(
begin=self.gpu_mem_used_at_start,
end=self.gpu_mem_used_now,
alloc=(self.gpu_mem_used_now - self.gpu_mem_used_at_start),
peaked=max(0, self.gpu_mem_used_peak - self.gpu_mem_used_now),
)
# cpu
self.cpu_mem_used_now = self.cpu_mem_used()
self.cpu[self.cur_stage] = dict(
begin=self.cpu_mem_used_at_start,
end=self.cpu_mem_used_now,
alloc=(self.cpu_mem_used_now - self.cpu_mem_used_at_start),
peaked=max(0, self.cpu_mem_used_peak - self.cpu_mem_used_now),
)
# reset - cycle finished
self.cur_stage = None
def update_metrics(self, stage, metrics):
"""updates the metrics"""
if self.skip_memory_metrics:
return
# deal with nested calls of eval during train - simply ignore those
if self.cur_stage is not None and self.cur_stage != stage:
return
# since we don't have a way to return init metrics, we push them into the first of train/val/predict
stages = [stage]
if not self.init_reported:
stages.insert(0, "init")
self.init_reported = True
for stage in stages:
for t in ["alloc", "peaked"]:
if stage in self.cpu and t in self.cpu[stage]:
metrics[f"{stage}_mem_cpu_{t}_delta"] = self.cpu[stage][t]
if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
metrics[f"{stage}_mem_gpu_{t}_delta"] = self.gpu[stage][t]
# if we need additional debug info, enable the following
# for t in ["begin", "end"]:
# if stage in self.cpu and t in self.cpu[stage]:
# metrics[f"{stage}_mem_cpu_{t}"] = self.cpu[stage][t]
# if self.torch is not None and stage in self.gpu and t in self.gpu[stage]:
# metrics[f"{stage}_mem_gpu_{t}"] = self.gpu[stage][t]
# since memory can be allocated before init, and it might be difficult to track overall
# memory usage, in particular for GPU, let's report memory usage at the point init was called
if stages[0] == "init":
metrics["before_init_mem_cpu"] = self.cpu["init"]["begin"]
if self.torch is not None:
metrics["before_init_mem_gpu"] = self.gpu["init"]["begin"]
# if we also wanted to report any additional memory allocations in between init and
# whatever the next stage was we could also report this:
# if self.cpu["init"]["end"] != self.cpu[stage]["begin"]:
# metrics[f"after_init_mem_cpu_delta"] = self.cpu[stage]["begin"] - self.cpu["init"]["end"]
# if self.torch is not None and self.gpu["init"]["end"] != self.gpu[stage]["begin"]:
# metrics[f"after_init_mem_gpu_delta"] = self.gpu[stage]["begin"] - self.gpu["init"]["end"]
def stop_and_update_metrics(self, metrics=None):
"""combine stop and metrics update in one call for simpler code"""
if self.skip_memory_metrics:
return
stage = self.derive_stage()
self.stop(stage)
# init doesn't have metrics to update so we just save that data for later stages to retrieve
if metrics is not None:
self.update_metrics(stage, metrics)
def has_length(dataset):
"""
Checks if the dataset implements __len__() and it doesn't raise an error
"""
try:
return len(dataset) is not None
except TypeError:
# TypeError: len() of unsized object
return False
def denumpify_detensorize(metrics):
"""
Recursively calls `.item()` on the element of the dictionary passed
"""
if isinstance(metrics, (list, tuple)):
return type(metrics)(denumpify_detensorize(m) for m in metrics)
elif isinstance(metrics, dict):
return type(metrics)({k: denumpify_detensorize(v) for k, v in metrics.items()})
elif isinstance(metrics, np.generic):
return metrics.item()
elif is_torch_available() and isinstance(metrics, torch.Tensor) and metrics.numel() == 1:
return metrics.item()
return metrics
def number_of_arguments(func):
"""
Return the number of arguments of the passed function, even if it's a partial function.
"""
if isinstance(func, functools.partial):
total_args = len(inspect.signature(func.func).parameters)
return total_args - len(func.args) - len(func.keywords)
return len(inspect.signature(func).parameters)
class ShardedDDPOption(ExplicitEnum):
SIMPLE = "simple"
ZERO_DP_2 = "zero_dp_2"
ZERO_DP_3 = "zero_dp_3"
OFFLOAD = "offload"
AUTO_WRAP = "auto_wrap"
def find_executable_batch_size(
function: callable = None, starting_batch_size: int = 128, auto_find_batch_size: bool = False
):
"""
Args:
A basic decorator that will try to execute `function`. If it fails from exceptions related to out-of-memory or
CUDNN, the batch size is cut in half and passed to `function` `function` must take in a `batch_size` parameter as
its first argument.
function (`callable`, *optional*)
A function to wrap
starting_batch_size (`int`, *optional*)
The batch size to try and fit into memory
auto_find_batch_size (`bool`, *optional*)
If False, will just execute `function`
"""
if function is None:
return functools.partial(
find_executable_batch_size,
starting_batch_size=starting_batch_size,
auto_find_batch_size=auto_find_batch_size,
)
if auto_find_batch_size:
requires_backends(find_executable_batch_size, "accelerate")
import accelerate.memory_utils as mem_utils
return mem_utils.find_executable_batch_size(function=function, starting_batch_size=starting_batch_size)
return functools.partial(function, batch_size=starting_batch_size)
class FSDPOption(ExplicitEnum):
FULL_SHARD = "full_shard"
SHARD_GRAD_OP = "shard_grad_op"
OFFLOAD = "offload"
AUTO_WRAP = "auto_wrap"
class RemoveColumnsCollator:
"""Wrap the data collator to remove unused columns before they are passed to the collator."""
def __init__(
self,
data_collator,
signature_columns,
logger=None,
model_name: Optional[str] = None,
description: Optional[str] = None,
):
self.data_collator = data_collator
self.signature_columns = signature_columns
self.logger = logger
self.description = description
self.model_name = model_name
self.message_logged = False
def _remove_columns(self, feature: dict) -> dict:
if not self.message_logged and self.logger and self.model_name:
ignored_columns = list(set(feature.keys()) - set(self.signature_columns))
if len(ignored_columns) > 0:
dset_description = "" if self.description is None else f"in the {self.description} set"
self.logger.info(
f"The following columns {dset_description} don't have a corresponding argument in "
f"`{self.model_name}.forward` and have been ignored: {', '.join(ignored_columns)}."
f" If {', '.join(ignored_columns)} are not expected by `{self.model_name}.forward`, "
" you can safely ignore this message."
)
self.message_logged = True
return {k: v for k, v in feature.items() if k in self.signature_columns}
def __call__(self, features: List[dict]):
features = [self._remove_columns(feature) for feature in features]
return self.data_collator(features)
|
race_condition_2_threads.py | import threading
from time import sleep
from random import random
counter = 0
def randsleep(): return sleep(0.1 * random())
def incr(n):
global counter
for count in range(n):
current = counter
randsleep()
counter = current + 1
randsleep()
# fix race condition with locking the counter variable
incr_lock = threading.Lock()
def incr(n):
global counter
for count in range(n):
with incr_lock:
current = counter
randsleep()
counter = current + 1
randsleep()
n = 7
t1 = threading.Thread(target=incr, args=(n, ))
t2 = threading.Thread(target=incr, args=(n, ))
t1.start()
t2.start()
t1.join()
t2.join()
print(f'Counter: {counter}')
|
ssl_analyzer.py | #!/usr/bin/env python3
import socket
import sys
import json
from datetime import datetime
from ssl import PROTOCOL_TLSv1
from time import sleep
from ocspchecker import ocspchecker
from conf_reader import ReadConfig
from crl_check import check_crl, CRLStatus
from db import get_connection, insert_data, close_connection
try:
from OpenSSL import SSL, crypto
from json2html import *
except ImportError:
print('Please install required modules: pip install -r requirements.txt')
sys.exit(1)
class Clr:
"""Text colors."""
RST = '\033[39m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
print('ssl_analyzer_start')
class VerifyCallback:
def __init__(self):
self.connection = None
self.err_no = 0
self.depth = None
self.result = None
def callback(self, connection, cert, err_no, depth, result):
self.connection = connection
self.err_no = err_no
self.depth = depth
self.result = result
return result
class SSLChecker:
total_valid = 0
total_expired = 0
total_failed = 0
total_warning = 0
def __init__(self):
self.cafile = "./data/cacert.pem"
self.verify = VerifyCallback()
self.table_keys = ['host', 'open443', 'error', 'ssl_error', 'cert_ver', 'cert_alg', 'issuer_c', 'issuer_o',
'pub_key_type', 'pub_key_bits', 'cert_exp', 'valid_from', 'valid_till', 'validity_days',
'days_left', 'ocsp_status', 'ocsp_error', 'crl_status', 'crl_reason']
# db conn
self.db_connection = get_connection()
def get_cert(self, host, port):
"""Connection to the host."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_context = SSL.Context(PROTOCOL_TLSv1)
ssl_context.load_verify_locations(self.cafile)
ssl_context.set_verify(SSL.VERIFY_PEER, self.verify.callback)
sock.connect((host, int(port)))
ssl_connection = SSL.Connection(ssl_context, sock)
ssl_connection.set_tlsext_host_name(host.encode())
ssl_connection.set_connect_state()
ssl_connection.do_handshake()
cert = ssl_connection.get_peer_certificate()
sock.close()
return cert
def analyze_ssl(self, host, context):
"""Analyze the security of the SSL certificate."""
from urllib.request import urlopen
api_url = 'https://api.ssllabs.com/api/v3/'
while True:
main_request = json.loads(urlopen(api_url + 'analyze?host={}'.format(host)).read().decode('utf-8'))
if main_request['status'] in ('DNS', 'IN_PROGRESS'):
sleep(5)
continue
elif main_request['status'] == 'READY':
break
endpoint_data = json.loads(urlopen(api_url + 'getEndpointData?host={}&s={}'.format(
host, main_request['endpoints'][0]['ipAddress'])).read().decode('utf-8'))
# if the certificate is invalid
if endpoint_data['statusMessage'] == 'Certificate not valid for domain name':
return context
context[host]['grade'] = main_request['endpoints'][0]['grade']
context[host]['poodle_vuln'] = endpoint_data['details']['poodle']
context[host]['heartbleed_vuln'] = endpoint_data['details']['heartbleed']
context[host]['heartbeat_vuln'] = endpoint_data['details']['heartbeat']
context[host]['freak_vuln'] = endpoint_data['details']['freak']
context[host]['logjam_vuln'] = endpoint_data['details']['logjam']
context[host]['drownVulnerable'] = endpoint_data['details']['drownVulnerable']
return context
def get_cert_sans(self, x509cert):
"""
Get Subject Alt Names from Certificate. Shameless taken from stack overflow:
https://stackoverflow.com/users/4547691/anatolii-chmykhalo
"""
san = ''
ext_count = x509cert.get_extension_count()
for i in range(0, ext_count):
ext = x509cert.get_extension(i)
if 'subjectAltName' in str(ext.get_short_name()):
san = ext.__str__()
# replace commas to not break csv output
san = san.replace(',', ';')
return san
def get_cert_info(self, host, context, cert):
"""Get all the information about cert and create a JSON file."""
context['cert_ver'] = cert.get_version() # Version Number v1/v2/v3
context['cert_sn'] = str(cert.get_serial_number()) # Serial Number
context['cert_alg'] = cert.get_signature_algorithm().decode() # Signature Algorithm
# Issuer Name C=country name;O=OrganizationName;CN=common name
cert_issuer = cert.get_issuer()
context['issuer_c'] = cert_issuer.countryName
context['issuer_o'] = cert_issuer.organizationName
context['issuer_ou'] = cert_issuer.organizationalUnitName
context['issuer_cn'] = cert_issuer.commonName
# Subject Name
cert_subject = cert.get_subject()
context['issued_to'] = cert_subject.CN
context['issued_o'] = cert_subject.O
context['cert_sha1'] = cert.digest('sha1').decode()
# context['cert_sans'] = self.get_cert_sans(cert) # X509v3 Subject Alternative Name in Extensions
pub = cert.get_pubkey()
context['pub_key_type'] = pub.type()
context['pub_key_bits'] = pub.bits()
context['cert_exp'] = cert.has_expired()
context['cert_valid'] = False if cert.has_expired() else True
# Valid period
valid_from = datetime.strptime(cert.get_notBefore().decode('ascii'), '%Y%m%d%H%M%SZ')
context['valid_from'] = valid_from.strftime('%Y-%m-%d')
valid_till = datetime.strptime(cert.get_notAfter().decode('ascii'), '%Y%m%d%H%M%SZ')
context['valid_till'] = valid_till.strftime('%Y-%m-%d')
# Validity days
context['validity_days'] = (valid_till - valid_from).days
# Validity in days from now
now = datetime.now()
context['days_left'] = (valid_till - now).days
# Valid days left
context['valid_days_to_expire'] = (datetime.strptime(context['valid_till'], '%Y-%m-%d') - datetime.now()).days
if cert.has_expired():
self.total_expired += 1
else:
self.total_valid += 1
# If the certificate has less than 15 days validity
if context['valid_days_to_expire'] <= 15:
self.total_warning += 1
status = ocspchecker.get_ocsp_status(host)
status_len = len(status)
if status_len == 2:
context['ocsp_error'] = status[1]
elif status_len == 3:
context['ocsp_status'] = status[2].split(": ")[1]
# since crl check is time-consuming, we just check it when ocsp fail or ocsp get revoked status
crl_status = check_crl(crypto.dump_certificate(crypto.FILETYPE_PEM, cert))
context['crl_status'] = str(crl_status[0])
if crl_status[0] is not CRLStatus.GOOD:
tmp_str = crl_status[1]
context['crl_reason'] = tmp_str[:250] # limit the string length to avoid database error
return context
def print_status(self, context, host):
"""Print all the useful info about host."""
print('\t{}[+]{} {}\n\t{}'.format(Clr.GREEN, Clr.RST, host, '-' * (len(host) + 5)))
for key, value in context[host].items():
print('\t\t', key, ': ', value)
print('\n')
def get_status_list(self, host, context):
"""
obtain detail ssl info
:param host: host to check
:param context: raw res
:return: list
"""
ret = []
for key in self.table_keys:
ret.append(context[host][key])
return ret
def show_result(self, args):
"""Get the context."""
context = {}
hosts = args['hosts']
for host in hosts:
sub_context = dict.fromkeys(self.table_keys, 'null')
sub_context['host'] = host
try:
# check if 443 port open
port = 443
is_open, update_host = self.check_port_open(host, port)
if not is_open:
sub_context['open443'] = False # it means the host did not open 443 port
else:
sub_context['open443'] = True
if update_host:
host = 'www.' + host
sub_context['host'] = host
# even port not open, still try to get cert
cert = self.get_cert(host, port)
self.get_cert_info(host, sub_context, cert)
# except SSL.SysCallError:
# sub_context['error'] = 'Failed: Misconfiguration SSL/TLS'
except Exception as error:
sub_context['error'] = str(error)
print('\t{}[-]{} {:<20s} Failed: {}\n'.format(Clr.RED, Clr.RST, host, error))
except KeyboardInterrupt:
print('{}Canceling script...{}\n'.format(Clr.YELLOW, Clr.RST))
sys.exit(1)
sub_context['ssl_error'] = str(self.verify.err_no)
context[host] = sub_context
self.print_status(context, host)
# insert data to database
insert_list = self.get_status_list(host, context)
insert_data(self.db_connection, insert_list)
close_connection(self.db_connection)
def check_port_open(self, host, port):
is_open = True
is_success = True
should_update_host = False
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
try:
conn = sock.connect_ex((host, port))
if conn != 0:
is_success = False
except Exception as err:
is_success = False
pass
if not is_success:
try:
host = 'www.' + host
conn = sock.connect_ex((host, port))
if conn != 0:
is_open = False
else:
should_update_host = True
except Exception as err:
raise err
return is_open, should_update_host
def csv_reader(f_name, thread_num=1, total_num=120000):
"""
read csv
:param thread_num: multi-thread numbers
:param total_num: nums want to analyze
:param f_name: file name
:return: domain list [[...], [...], [...], ...]
"""
import csv
print('start to read csv.')
ret = []
if total_num == 0:
total_num = len(open(f_name).readlines())
print('total number of hosts to analyze: ', total_num)
sites_count = total_num / thread_num
left = total_num % thread_num
f = csv.reader(open(f_name, 'r'))
for no in range(thread_num):
temp = []
if no == 0:
for j in range(int(left)):
line = next(f)
temp.append(line[1])
for i in range(int(sites_count)):
line = next(f)
temp.append(line[1])
ret.append(temp)
return ret
def checker_with_multi_thread(hosts):
"""
multi thread analyze
:param hosts: [[]]
:return:
"""
import threading
for item in hosts:
checker = SSLChecker()
t = threading.Thread(target=checker.show_result, args=({'hosts': item},))
t.setDaemon(False)
t.start()
def checker_without_multi_thread(hosts):
"""
single thread analyze
test: hosts': ['hexun.com', 'expired.badssl.com', 'revoked.badssl.com', 'google.com']
:param hosts: [[]]
:return:
"""
checker = SSLChecker()
checker.show_result({'hosts': hosts[0]})
if __name__ == '__main__':
config = ReadConfig()
use_threads = config.get_multi_thread_opt()
host_list = csv_reader(config.get_csv_path(),
thread_num=config.get_thread_num(),
total_num=config.get_analyze_nums())
if use_threads:
checker_with_multi_thread(host_list)
else:
checker_without_multi_thread(host_list)
|
test_780_tailscale.py | import os
import re
import socket
import sys
from threading import Thread
import pytest
from .md_conf import MDConf
class TailscaleFaker:
def __init__(self, env, path):
self.env = env
self._uds_path = path
self._done = False
def start(self):
def process(self):
self._socket.listen(1)
self._process()
try:
os.unlink(self._uds_path)
except OSError:
if os.path.exists(self._uds_path):
raise
self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._socket.bind(self._uds_path)
self._thread = Thread(target=process, daemon=True, args=[self])
self._thread.start()
def stop(self):
self._done = True
self._socket.close()
def send_error(self, c, status, reason):
c.sendall(f"""HTTP/1.1 {status} {reason}\r
Server: TailscaleFaker\r
Content-Length: 0\r
Connection: close\r
\r
""".encode())
def send_data(self, c, ctype: str, data: bytes):
c.sendall(f"""HTTP/1.1 200 OK\r
Server: TailscaleFaker\r
Content-Type: {ctype}\r
Content-Length: {len(data)}\r
Connection: close\r
\r
""".encode() + data)
def _process(self):
# a http server written on a sunny afternooon
while self._done is False:
try:
c, client_address = self._socket.accept()
try:
data = c.recv(1024)
lines = data.decode().splitlines()
m = re.match(r'^(?P<method>\w+)\s+(?P<uri>\S+)\s+HTTP/1.1', lines[0])
if m is None:
self.send_error(c, 400, "Bad Request")
continue
uri = m.group('uri')
m = re.match(r'/localapi/v0/cert/(?P<domain>\S+)\?type=(?P<type>\w+)', uri)
if m is None:
self.send_error(c, 404, "Not Found")
continue
domain = m.group('domain')
cred_type = m.group('type')
creds = self.env.get_credentials_for_name(domain)
sys.stderr.write(f"lookup domain={domain}, type={cred_type} -> {creds}\n")
if creds is None or len(creds) == 0:
self.send_error(c, 404, "Not Found")
continue
if cred_type == 'crt':
self.send_data(c, "text/plain", creds[0].cert_pem)
pass
elif cred_type == 'key':
self.send_data(c, "text/plain", creds[0].pkey_pem)
else:
self.send_error(c, 404, "Not Found")
continue
finally:
c.close()
except ConnectionAbortedError:
self._done = True
class TestTailscale:
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env, acme):
UDS_PATH = f"{env.gen_dir}/tailscale.sock"
TestTailscale.UDS_PATH = UDS_PATH
faker = TailscaleFaker(env=env, path=UDS_PATH)
faker.start()
env.APACHE_CONF_SRC = "data/test_auto"
acme.start(config='default')
env.clear_store()
MDConf(env).install()
assert env.apache_restart() == 0
yield
faker.stop()
@pytest.fixture(autouse=True, scope='function')
def _method_scope(self, env, request):
env.clear_store()
self.test_domain = env.get_request_domain(request)
def _write_res_file(self, doc_root, name, content):
if not os.path.exists(doc_root):
os.makedirs(doc_root)
open(os.path.join(doc_root, name), "w").write(content)
# create a MD using `tailscale` as protocol, wrong path
def test_md_780_001(self, env):
domain = env.tailscale_domain
# generate config with one MD
domains = [domain]
socket_path = '/xxx'
conf = MDConf(env, admin="admin@" + domain)
conf.start_md(domains)
conf.add([
"MDCertificateProtocol tailscale",
f"MDCertificateAuthority file://{socket_path}",
])
conf.end_md()
conf.add_vhost(domains)
conf.install()
# restart and watch it fail due to wrong tailscale unix socket path
assert env.apache_restart() == 0
md = env.await_error(domain)
assert md
assert md['renewal']['errors'] > 0
assert md['renewal']['last']['status-description'] == 'No such file or directory'
assert md['renewal']['last']['detail'] == \
f"tailscale socket not available, may not be up: {socket_path}"
# create a MD using `tailscale` as protocol, path to faker, should succeed
def test_md_780_002(self, env):
domain = env.tailscale_domain
# generate config with one MD
domains = [domain]
socket_path = '/xxx'
conf = MDConf(env, admin="admin@" + domain)
conf.start_md(domains)
conf.add([
"MDCertificateProtocol tailscale",
f"MDCertificateAuthority file://{self.UDS_PATH}",
])
conf.end_md()
conf.add_vhost(domains)
conf.install()
# restart and watch it fail due to wrong tailscale unix socket path
assert env.apache_restart() == 0
assert env.await_completion(domains)
assert env.apache_restart() == 0
env.check_md_complete(domain)
# create a MD using `tailscale` as protocol, but domain name not assigned by tailscale
def test_md_780_003(self, env):
domain = "test.not-correct.ts.net"
# generate config with one MD
domains = [domain]
socket_path = '/xxx'
conf = MDConf(env, admin="admin@" + domain)
conf.start_md(domains)
conf.add([
"MDCertificateProtocol tailscale",
f"MDCertificateAuthority file://{self.UDS_PATH}",
])
conf.end_md()
conf.add_vhost(domains)
conf.install()
# restart and watch it fail due to wrong tailscale unix socket path
assert env.apache_restart() == 0
md = env.await_error(domain)
assert md
assert md['renewal']['errors'] > 0
assert md['renewal']['last']['status-description'] == 'No such file or directory'
assert md['renewal']['last']['detail'] == "retrieving certificate from tailscale"
|
test_webui.py | #!/usr/bin/env python
# coding=UTF-8
import os
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
import time
import unittest
class BasicWebUiTest(unittest.TestCase):
test_uri = "http://www.mnot.net/"
def setUp(self):
self.browser = webdriver.PhantomJS()
self.browser.get(redbot_uri)
self.uri = self.browser.find_element_by_id("uri")
self.uri.send_keys(self.test_uri)
self.uri.submit()
time.sleep(1.0)
self.check_complete()
def test_multi(self):
check = self.browser.find_element_by_css_selector('a[accesskey="a"]')
check.click()
time.sleep(0.5)
def check_complete(self):
try:
self.browser.find_element_by_css_selector("div.footer")
except NoSuchElementException:
raise Exception("Page not complete.")
self.browser.save_screenshot('dump.png')
def tearDown(self):
self.check_complete()
self.browser.close()
class CnnWebUiTest(BasicWebUiTest):
test_uri = 'http://edition.cnn.com/'
if __name__ == "__main__":
test_host = "localhost"
test_port = 8080
redbot_uri = "http://%s:%s/" % (test_host, test_port)
import sys
sys.path.insert(0, "deploy")
def redbot_run():
import webui
webui.standalone_main(test_host, test_port, "share")
from multiprocessing import Process
p = Process(target=redbot_run)
p.start()
unittest.main(exit=False, verbosity=2)
print("done webui test...")
p.terminate()
|
test_datapipe.py | import http.server
import itertools
import os
import os.path
import pickle
import random
import socketserver
import sys
import tarfile
import tempfile
import threading
import time
import unittest
import warnings
import zipfile
from functools import partial
from typing import (
Any,
Awaitable,
Dict,
Generic,
Iterator,
List,
NamedTuple,
Optional,
Set,
Tuple,
Type,
TypeVar,
Union,
)
from unittest import skipIf
import numpy as np
import torch
import torch.nn as nn
import torch.utils.data.backward_compatibility
import torch.utils.data.datapipes as dp
import torch.utils.data.graph
import torch.utils.data.sharding
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.utils.data import (
DataLoader,
DataChunk,
IterDataPipe,
MapDataPipe,
RandomSampler,
argument_validation,
runtime_validation,
runtime_validation_disabled,
)
from torch.utils.data.datapipes.utils.decoder import (
basichandlers as decoder_basichandlers,
)
try:
import torchvision.transforms
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = skipIf(not HAS_TORCHVISION, "no torchvision")
try:
import dill
# XXX: By default, dill writes the Pickler dispatch table to inject its
# own logic there. This globally affects the behavior of the standard library
# pickler for any user who transitively depends on this module!
# Undo this extension to avoid altering the behavior of the pickler globally.
dill.extend(use_dill=False)
HAS_DILL = True
except ImportError:
HAS_DILL = False
skipIfNoDill = skipIf(not HAS_DILL, "no dill")
T_co = TypeVar("T_co", covariant=True)
def create_temp_dir_and_files():
# The temp dir and files within it will be released and deleted in tearDown().
# Adding `noqa: P201` to avoid mypy's warning on not releasing the dir handle within this function.
temp_dir = tempfile.TemporaryDirectory() # noqa: P201
temp_dir_path = temp_dir.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.txt') as f:
temp_file1_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.byte') as f:
temp_file2_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_dir_path, delete=False, suffix='.empty') as f:
temp_file3_name = f.name
with open(temp_file1_name, 'w') as f1:
f1.write('0123456789abcdef')
with open(temp_file2_name, 'wb') as f2:
f2.write(b"0123456789abcdef")
temp_sub_dir = tempfile.TemporaryDirectory(dir=temp_dir_path) # noqa: P201
temp_sub_dir_path = temp_sub_dir.name
with tempfile.NamedTemporaryFile(dir=temp_sub_dir_path, delete=False, suffix='.txt') as f:
temp_sub_file1_name = f.name
with tempfile.NamedTemporaryFile(dir=temp_sub_dir_path, delete=False, suffix='.byte') as f:
temp_sub_file2_name = f.name
with open(temp_sub_file1_name, 'w') as f1:
f1.write('0123456789abcdef')
with open(temp_sub_file2_name, 'wb') as f2:
f2.write(b"0123456789abcdef")
return [(temp_dir, temp_file1_name, temp_file2_name, temp_file3_name),
(temp_sub_dir, temp_sub_file1_name, temp_sub_file2_name)]
class TestDataChunk(TestCase):
def setUp(self):
self.elements = list(range(10))
random.shuffle(self.elements)
self.chunk: DataChunk[int] = DataChunk(self.elements)
def test_getitem(self):
for i in range(10):
self.assertEqual(self.elements[i], self.chunk[i])
def test_iter(self):
for ele, dc in zip(self.elements, iter(self.chunk)):
self.assertEqual(ele, dc)
def test_len(self):
self.assertEqual(len(self.elements), len(self.chunk))
def test_as_string(self):
self.assertEqual(str(self.chunk), str(self.elements))
batch = [self.elements] * 3
chunks: List[DataChunk[int]] = [DataChunk(self.elements)] * 3
self.assertEqual(str(batch), str(chunks))
def test_sort(self):
chunk: DataChunk[int] = DataChunk(self.elements)
chunk.sort()
self.assertTrue(isinstance(chunk, DataChunk))
for i, d in enumerate(chunk):
self.assertEqual(i, d)
def test_reverse(self):
chunk: DataChunk[int] = DataChunk(self.elements)
chunk.reverse()
self.assertTrue(isinstance(chunk, DataChunk))
for i in range(10):
self.assertEqual(chunk[i], self.elements[9 - i])
def test_random_shuffle(self):
elements = list(range(10))
chunk: DataChunk[int] = DataChunk(elements)
rng = random.Random(0)
rng.shuffle(chunk)
rng = random.Random(0)
rng.shuffle(elements)
self.assertEqual(chunk, elements)
class TestIterableDataPipeBasic(TestCase):
def setUp(self):
ret = create_temp_dir_and_files()
self.temp_dir = ret[0][0]
self.temp_files = ret[0][1:]
self.temp_sub_dir = ret[1][0]
self.temp_sub_files = ret[1][1:]
def tearDown(self):
try:
self.temp_sub_dir.cleanup()
self.temp_dir.cleanup()
except Exception as e:
warnings.warn("TestIterableDatasetBasic was not able to cleanup temp dir due to {}".format(str(e)))
def test_listdirfiles_iterable_datapipe(self):
temp_dir = self.temp_dir.name
datapipe = dp.iter.ListDirFiles(temp_dir, '')
count = 0
for pathname in datapipe:
count = count + 1
self.assertTrue(pathname in self.temp_files)
self.assertEqual(count, len(self.temp_files))
count = 0
datapipe = dp.iter.ListDirFiles(temp_dir, '', recursive=True)
for pathname in datapipe:
count = count + 1
self.assertTrue((pathname in self.temp_files) or (pathname in self.temp_sub_files))
self.assertEqual(count, len(self.temp_files) + len(self.temp_sub_files))
def test_loadfilesfromdisk_iterable_datapipe(self):
# test import datapipe class directly
from torch.utils.data.datapipes.iter import (
ListDirFiles,
LoadFilesFromDisk,
)
temp_dir = self.temp_dir.name
datapipe1 = ListDirFiles(temp_dir, '')
datapipe2 = LoadFilesFromDisk(datapipe1)
count = 0
for rec in datapipe2:
count = count + 1
self.assertTrue(rec[0] in self.temp_files)
with open(rec[0], 'rb') as f:
self.assertEqual(rec[1].read(), f.read())
rec[1].close()
self.assertEqual(count, len(self.temp_files))
# TODO(VitalyFedyunin): Generates unclosed buffer warning, need to investigate
def test_readfilesfromtar_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_tarfile_pathname = os.path.join(temp_dir, "test_tar.tar")
with tarfile.open(temp_tarfile_pathname, "w:gz") as tar:
tar.add(self.temp_files[0])
tar.add(self.temp_files[1])
tar.add(self.temp_files[2])
datapipe1 = dp.iter.ListDirFiles(temp_dir, '*.tar')
datapipe2 = dp.iter.LoadFilesFromDisk(datapipe1)
datapipe3 = dp.iter.ReadFilesFromTar(datapipe2)
# read extracted files before reaching the end of the tarfile
for rec, temp_file in itertools.zip_longest(datapipe3, self.temp_files):
self.assertTrue(rec is not None and temp_file is not None)
self.assertEqual(os.path.basename(rec[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(rec[1].read(), f.read())
rec[1].close()
# read extracted files after reaching the end of the tarfile
data_refs = list(datapipe3)
self.assertEqual(len(data_refs), len(self.temp_files))
for data_ref, temp_file in zip(data_refs, self.temp_files):
self.assertEqual(os.path.basename(data_ref[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(data_ref[1].read(), f.read())
data_ref[1].close()
# TODO(VitalyFedyunin): Generates unclosed buffer warning, need to investigate
def test_readfilesfromzip_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_zipfile_pathname = os.path.join(temp_dir, "test_zip.zip")
with zipfile.ZipFile(temp_zipfile_pathname, 'w') as myzip:
myzip.write(self.temp_files[0])
myzip.write(self.temp_files[1])
myzip.write(self.temp_files[2])
datapipe1 = dp.iter.ListDirFiles(temp_dir, '*.zip')
datapipe2 = dp.iter.LoadFilesFromDisk(datapipe1)
datapipe3 = dp.iter.ReadFilesFromZip(datapipe2)
# read extracted files before reaching the end of the zipfile
for rec, temp_file in itertools.zip_longest(datapipe3, self.temp_files):
self.assertTrue(rec is not None and temp_file is not None)
self.assertEqual(os.path.basename(rec[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(rec[1].read(), f.read())
rec[1].close()
# read extracted files before reaching the end of the zipile
data_refs = list(datapipe3)
self.assertEqual(len(data_refs), len(self.temp_files))
for data_ref, temp_file in zip(data_refs, self.temp_files):
self.assertEqual(os.path.basename(data_ref[0]), os.path.basename(temp_file))
with open(temp_file, 'rb') as f:
self.assertEqual(data_ref[1].read(), f.read())
data_ref[1].close()
def test_routeddecoder_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_pngfile_pathname = os.path.join(temp_dir, "test_png.png")
png_data = np.array([[[1., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [1., 0., 0.]]], dtype=np.single)
np.save(temp_pngfile_pathname, png_data)
datapipe1 = dp.iter.ListDirFiles(temp_dir, ['*.png', '*.txt'])
datapipe2 = dp.iter.LoadFilesFromDisk(datapipe1)
def _png_decoder(extension, data):
if extension != 'png':
return None
return np.load(data)
def _helper(prior_dp, dp, channel_first=False):
# Byte stream is not closed
for inp in prior_dp:
self.assertFalse(inp[1].closed)
for inp, rec in zip(prior_dp, dp):
ext = os.path.splitext(rec[0])[1]
if ext == '.png':
expected = np.array([[[1., 0., 0.], [1., 0., 0.]], [[1., 0., 0.], [1., 0., 0.]]], dtype=np.single)
if channel_first:
expected = expected.transpose(2, 0, 1)
self.assertEqual(rec[1], expected)
else:
with open(rec[0], 'rb') as f:
self.assertEqual(rec[1], f.read().decode('utf-8'))
# Corresponding byte stream is closed by Decoder
self.assertTrue(inp[1].closed)
cached = list(datapipe2)
datapipe3 = dp.iter.RoutedDecoder(cached, _png_decoder)
datapipe3.add_handler(decoder_basichandlers)
_helper(cached, datapipe3)
cached = list(datapipe2)
datapipe4 = dp.iter.RoutedDecoder(cached, decoder_basichandlers)
datapipe4.add_handler(_png_decoder)
_helper(cached, datapipe4, channel_first=True)
# TODO(VitalyFedyunin): Generates unclosed buffer warning, need to investigate
def test_groupbykey_iterable_datapipe(self):
temp_dir = self.temp_dir.name
temp_tarfile_pathname = os.path.join(temp_dir, "test_tar.tar")
file_list = [
"a.png", "b.png", "c.json", "a.json", "c.png", "b.json", "d.png",
"d.json", "e.png", "f.json", "g.png", "f.png", "g.json", "e.json",
"h.txt", "h.json"]
with tarfile.open(temp_tarfile_pathname, "w:gz") as tar:
for file_name in file_list:
file_pathname = os.path.join(temp_dir, file_name)
with open(file_pathname, 'w') as f:
f.write('12345abcde')
tar.add(file_pathname)
datapipe1 = dp.iter.ListDirFiles(temp_dir, '*.tar')
datapipe2 = dp.iter.LoadFilesFromDisk(datapipe1)
datapipe3 = dp.iter.ReadFilesFromTar(datapipe2)
datapipe4 = dp.iter.GroupByKey(datapipe3, group_size=2)
expected_result = [("a.png", "a.json"), ("c.png", "c.json"), ("b.png", "b.json"), ("d.png", "d.json"), (
"f.png", "f.json"), ("g.png", "g.json"), ("e.png", "e.json"), ("h.json", "h.txt")]
count = 0
for rec, expected in zip(datapipe4, expected_result):
count = count + 1
self.assertEqual(os.path.basename(rec[0][0]), expected[0])
self.assertEqual(os.path.basename(rec[1][0]), expected[1])
for i in [0, 1]:
self.assertEqual(rec[i][1].read(), b'12345abcde')
rec[i][1].close()
self.assertEqual(count, 8)
def test_demux_mux_datapipe(self):
numbers = NumbersDataset(10)
n1, n2 = numbers.demux(2, lambda x: x % 2)
self.assertEqual([0, 2, 4, 6, 8], list(n1))
self.assertEqual([1, 3, 5, 7, 9], list(n2))
numbers = NumbersDataset(10)
n1, n2, n3 = numbers.demux(3, lambda x: x % 3)
n = n1.mux(n2, n3)
self.assertEqual(list(range(10)), list(n))
class FileLoggerSimpleHTTPRequestHandler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, logfile=None, **kwargs):
self.__loggerHandle = None
if logfile is not None:
self.__loggerHandle = open(logfile, 'a+')
super().__init__(*args, **kwargs)
def log_message(self, format, *args):
if self.__loggerHandle is not None:
self.__loggerHandle.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format % args))
return
def finish(self):
if self.__loggerHandle is not None:
self.__loggerHandle.close()
super().finish()
def setUpLocalServerInThread():
try:
Handler = partial(FileLoggerSimpleHTTPRequestHandler, logfile=None)
socketserver.TCPServer.allow_reuse_address = True
server = socketserver.TCPServer(("", 0), Handler)
server_addr = "{host}:{port}".format(host=server.server_address[0], port=server.server_address[1])
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
# Wait a bit for the server to come up
time.sleep(3)
return (server_thread, server_addr, server)
except Exception:
raise
def create_temp_files_for_serving(tmp_dir, file_count, file_size,
file_url_template):
furl_local_file = os.path.join(tmp_dir, "urls_list")
with open(furl_local_file, 'w') as fsum:
for i in range(0, file_count):
f = os.path.join(tmp_dir, "webfile_test_{num}.data".format(num=i))
write_chunk = 1024 * 1024 * 16
rmn_size = file_size
while rmn_size > 0:
with open(f, 'ab+') as fout:
fout.write(os.urandom(min(rmn_size, write_chunk)))
rmn_size = rmn_size - min(rmn_size, write_chunk)
fsum.write(file_url_template.format(num=i))
class TestIterableDataPipeHttp(TestCase):
__server_thread: threading.Thread
__server_addr: str
__server: socketserver.TCPServer
@classmethod
def setUpClass(cls):
try:
(cls.__server_thread, cls.__server_addr,
cls.__server) = setUpLocalServerInThread()
except Exception as e:
warnings.warn("TestIterableDataPipeHttp could\
not set up due to {0}".format(str(e)))
@classmethod
def tearDownClass(cls):
try:
cls.__server.shutdown()
cls.__server_thread.join(timeout=15)
except Exception as e:
warnings.warn("TestIterableDataPipeHttp could\
not tear down (clean up temp directory or terminate\
local server) due to {0}".format(str(e)))
def _http_test_base(self, test_file_size, test_file_count, timeout=None,
chunk=None):
def _get_data_from_tuple_fn(data, *args, **kwargs):
return data[args[0]]
with tempfile.TemporaryDirectory(dir=os.getcwd()) as tmpdir:
# create tmp dir and files for test
base_tmp_dir = os.path.basename(os.path.normpath(tmpdir))
file_url_template = ("http://{server_addr}/{tmp_dir}/"
"/webfile_test_{num}.data\n")\
.format(server_addr=self.__server_addr, tmp_dir=base_tmp_dir,
num='{num}')
create_temp_files_for_serving(tmpdir, test_file_count,
test_file_size, file_url_template)
datapipe_dir_f = dp.iter.ListDirFiles(tmpdir, '*_list')
datapipe_f_lines = dp.iter.ReadLinesFromFile(datapipe_dir_f)
datapipe_line_url: IterDataPipe[str] = \
dp.iter.Map(datapipe_f_lines, _get_data_from_tuple_fn, (1,))
datapipe_http = dp.iter.HttpReader(datapipe_line_url,
timeout=timeout)
datapipe_tob = dp.iter.ToBytes(datapipe_http, chunk=chunk)
for (url, data) in datapipe_tob:
self.assertGreater(len(url), 0)
self.assertRegex(url, r'^http://.+\d+.data$')
if chunk is not None:
self.assertEqual(len(data), chunk)
else:
self.assertEqual(len(data), test_file_size)
@unittest.skip("Stress test on large amount of files skipped\
due to the CI timing constraint.")
def test_stress_http_reader_iterable_datapipes(self):
test_file_size = 10
# STATS: It takes about 5 hours to stress test 16 * 1024 * 1024
# files locally
test_file_count = 1024
self._http_test_base(test_file_size, test_file_count)
@unittest.skip("Test on the very large file skipped\
due to the CI timing constraint.")
def test_large_files_http_reader_iterable_datapipes(self):
# STATS: It takes about 11 mins to test a large file of 64GB locally
test_file_size = 1024 * 1024 * 128
test_file_count = 1
timeout = 30
chunk = 1024 * 1024 * 8
self._http_test_base(test_file_size, test_file_count, timeout=timeout,
chunk=chunk)
class IDP_NoLen(IterDataPipe):
def __init__(self, input_dp):
super().__init__()
self.input_dp = input_dp
def __iter__(self):
for i in self.input_dp:
yield i
class IDP(IterDataPipe):
def __init__(self, input_dp):
super().__init__()
self.input_dp = input_dp
self.length = len(input_dp)
def __iter__(self):
for i in self.input_dp:
yield i
def __len__(self):
return self.length
class MDP(MapDataPipe):
def __init__(self, input_dp):
super().__init__()
self.input_dp = input_dp
self.length = len(input_dp)
def __getitem__(self, index):
return self.input_dp[index]
def __len__(self) -> int:
return self.length
def _fake_fn(data, *args, **kwargs):
return data
def _fake_filter_fn(data, *args, **kwargs):
return data >= 5
def _worker_init_fn(worker_id):
random.seed(123)
class TestFunctionalIterDataPipe(TestCase):
# TODO(VitalyFedyunin): If dill installed this test fails
def _test_picklable(self):
arr = range(10)
picklable_datapipes: List[Tuple[Type[IterDataPipe], IterDataPipe, Tuple, Dict[str, Any]]] = [
(dp.iter.Map, IDP(arr), (), {}),
(dp.iter.Map, IDP(arr), (_fake_fn, (0, ), {'test': True}), {}),
(dp.iter.Collate, IDP(arr), (), {}),
(dp.iter.Collate, IDP(arr), (_fake_fn, (0, ), {'test': True}), {}),
(dp.iter.Filter, IDP(arr), (_fake_filter_fn, (0, ), {'test': True}), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in picklable_datapipes:
p = pickle.dumps(dpipe(input_dp, *dp_args, **dp_kwargs)) # type: ignore[call-arg]
unpicklable_datapipes: List[Tuple[Type[IterDataPipe], IterDataPipe, Tuple, Dict[str, Any]]] = [
(dp.iter.Map, IDP(arr), (lambda x: x, ), {}),
(dp.iter.Collate, IDP(arr), (lambda x: x, ), {}),
(dp.iter.Filter, IDP(arr), (lambda x: x >= 5, ), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in unpicklable_datapipes:
with warnings.catch_warnings(record=True) as wa:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"^Lambda function is not supported for pickle")
with self.assertRaises(AttributeError):
p = pickle.dumps(datapipe)
def test_concat_datapipe(self):
input_dp1 = IDP(range(10))
input_dp2 = IDP(range(5))
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.iter.Concat()
with self.assertRaisesRegex(TypeError, r"Expected all inputs to be `IterDataPipe`"):
dp.iter.Concat(input_dp1, ()) # type: ignore[arg-type]
concat_dp = input_dp1.concat(input_dp2)
self.assertEqual(len(concat_dp), 15)
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
# Test Reset
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
input_dp_nl = IDP_NoLen(range(5))
concat_dp = input_dp1.concat(input_dp_nl)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(concat_dp)
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
def test_map_datapipe(self):
input_dp = IDP(range(10))
def fn(item, dtype=torch.float, *, sum=False):
data = torch.tensor(item, dtype=dtype)
return data if not sum else data.sum()
map_dp = input_dp.map(fn)
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
map_dp = input_dp.map(fn=fn, fn_args=(torch.int, ), fn_kwargs={'sum': True})
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, torch.tensor(y, dtype=torch.int).sum())
from functools import partial
map_dp = input_dp.map(partial(fn, dtype=torch.int, sum=True))
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, torch.tensor(y, dtype=torch.int).sum())
input_dp_nl = IDP_NoLen(range(10))
map_dp_nl = input_dp_nl.map()
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(map_dp_nl)
for x, y in zip(map_dp_nl, input_dp_nl):
self.assertEqual(x, torch.tensor(y, dtype=torch.float))
# TODO(VitalyFedyunin): If dill installed this test fails
def _test_map_datapipe_nested_level(self):
input_dp = IDP([list(range(10)) for _ in range(3)])
def fn(item, *, dtype=torch.float):
return torch.tensor(item, dtype=dtype)
with warnings.catch_warnings(record=True) as wa:
map_dp = input_dp.map(lambda ls: ls * 2, nesting_level=0)
self.assertEqual(len(wa), 1)
self.assertRegex(str(wa[0].message), r"^Lambda function is not supported for pickle")
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(x, y * 2)
map_dp = input_dp.map(fn, nesting_level=1)
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(len(x), len(y))
for a, b in zip(x, y):
self.assertEqual(a, torch.tensor(b, dtype=torch.float))
map_dp = input_dp.map(fn, nesting_level=-1)
self.assertEqual(len(input_dp), len(map_dp))
for x, y in zip(map_dp, input_dp):
self.assertEqual(len(x), len(y))
for a, b in zip(x, y):
self.assertEqual(a, torch.tensor(b, dtype=torch.float))
map_dp = input_dp.map(fn, nesting_level=4)
with self.assertRaises(IndexError):
list(map_dp)
with self.assertRaises(ValueError):
input_dp.map(fn, nesting_level=-2)
def test_collate_datapipe(self):
arrs = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
input_dp = IDP(arrs)
def _collate_fn(batch):
return torch.tensor(sum(batch), dtype=torch.float)
collate_dp = input_dp.collate(collate_fn=_collate_fn)
self.assertEqual(len(input_dp), len(collate_dp))
for x, y in zip(collate_dp, input_dp):
self.assertEqual(x, torch.tensor(sum(y), dtype=torch.float))
input_dp_nl = IDP_NoLen(arrs)
collate_dp_nl = input_dp_nl.collate()
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(collate_dp_nl)
for x, y in zip(collate_dp_nl, input_dp_nl):
self.assertEqual(x, torch.tensor(y))
def test_batch_datapipe(self):
arrs = list(range(10))
input_dp = IDP(arrs)
with self.assertRaises(AssertionError):
input_dp.batch(batch_size=0)
# Default not drop the last batch
bs = 3
batch_dp = input_dp.batch(batch_size=bs)
self.assertEqual(len(batch_dp), 4)
for i, batch in enumerate(batch_dp):
self.assertEqual(len(batch), 1 if i == 3 else bs)
self.assertEqual(batch, arrs[i * bs: i * bs + len(batch)])
# Drop the last batch
bs = 4
batch_dp = input_dp.batch(batch_size=bs, drop_last=True)
self.assertEqual(len(batch_dp), 2)
for i, batch in enumerate(batch_dp):
self.assertEqual(len(batch), bs)
self.assertEqual(batch, arrs[i * bs: i * bs + len(batch)])
input_dp_nl = IDP_NoLen(range(10))
batch_dp_nl = input_dp_nl.batch(batch_size=2)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(batch_dp_nl)
def test_unbatch_datapipe(self):
target_length = 6
prebatch_dp = IDP(range(target_length))
input_dp = prebatch_dp.batch(3)
unbatch_dp = input_dp.unbatch()
self.assertEqual(len(list(unbatch_dp)), target_length)
for i, res in zip(prebatch_dp, unbatch_dp):
self.assertEqual(i, res)
input_dp = IDP([[0, 1, 2], [3, 4, 5]])
unbatch_dp = input_dp.unbatch()
self.assertEqual(len(list(unbatch_dp)), target_length)
for i, res in zip(prebatch_dp, unbatch_dp):
self.assertEqual(i, res)
input_dp = IDP([[[0, 1], [2, 3]], [[4, 5], [6, 7]]])
unbatch_dp = input_dp.unbatch()
expected_dp = [[0, 1], [2, 3], [4, 5], [6, 7]]
self.assertEqual(len(list(unbatch_dp)), 4)
for i, res in zip(expected_dp, unbatch_dp):
self.assertEqual(i, res)
unbatch_dp = input_dp.unbatch(unbatch_level=2)
expected_dp2 = [0, 1, 2, 3, 4, 5, 6, 7]
self.assertEqual(len(list(unbatch_dp)), 8)
for i, res in zip(expected_dp2, unbatch_dp):
self.assertEqual(i, res)
unbatch_dp = input_dp.unbatch(unbatch_level=-1)
self.assertEqual(len(list(unbatch_dp)), 8)
for i, res in zip(expected_dp2, unbatch_dp):
self.assertEqual(i, res)
input_dp = IDP([[0, 1, 2], [3, 4, 5]])
with self.assertRaises(ValueError):
unbatch_dp = input_dp.unbatch(unbatch_level=-2)
for i in unbatch_dp:
print(i)
with self.assertRaises(IndexError):
unbatch_dp = input_dp.unbatch(unbatch_level=5)
for i in unbatch_dp:
print(i)
def test_bucket_batch_datapipe(self):
input_dp = IDP(range(20))
with self.assertRaises(AssertionError):
dp.iter.BucketBatcher(input_dp, batch_size=0)
input_dp_nl = IDP_NoLen(range(20))
bucket_dp_nl = dp.iter.BucketBatcher(input_dp_nl, batch_size=7)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(bucket_dp_nl)
def _helper(**kwargs):
data_len = 100
arrs = list(range(data_len))
random.shuffle(arrs)
input_dp = IDP(arrs)
bucket_dp = dp.iter.BucketBatcher(input_dp, **kwargs)
self.assertEqual(len(bucket_dp), data_len // 3 if kwargs['drop_last'] else data_len // 3 + 1)
def _verify_bucket_sorted(bucket):
# Sort batch in a bucket
bucket = sorted(bucket, key=lambda x: x[0])
flat = [item for batch in bucket for item in batch]
# Elements in the bucket should be sorted
self.assertEqual(flat, sorted(flat))
batch_num = kwargs['batch_num'] if 'batch_num' in kwargs else 100
bucket = []
for idx, d in enumerate(bucket_dp):
self.assertEqual(d, sorted(d))
bucket.append(d)
if idx % batch_num == batch_num - 1:
_verify_bucket_sorted(bucket)
bucket = []
_verify_bucket_sorted(bucket)
def _sort_fn(data):
return sorted(data)
# In-batch shuffle
_helper(batch_size=3, drop_last=False, batch_num=5, sort_key=_sort_fn)
_helper(batch_size=3, drop_last=False, batch_num=2, bucket_num=2, sort_key=_sort_fn)
_helper(batch_size=3, drop_last=True, batch_num=2, sort_key=_sort_fn)
_helper(batch_size=3, drop_last=True, batch_num=2, bucket_num=2, sort_key=_sort_fn)
def test_filter_datapipe(self):
input_ds = IDP(range(10))
def _filter_fn(data, val, clip=False):
if clip:
return data >= val
return True
filter_dp = input_ds.filter(filter_fn=_filter_fn, fn_args=(5, ))
for data, exp in zip(filter_dp, range(10)):
self.assertEqual(data, exp)
filter_dp = input_ds.filter(filter_fn=_filter_fn, fn_kwargs={'val': 5, 'clip': True})
for data, exp in zip(filter_dp, range(5, 10)):
self.assertEqual(data, exp)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(filter_dp)
def _non_bool_fn(data):
return 1
filter_dp = input_ds.filter(filter_fn=_non_bool_fn)
with self.assertRaises(ValueError):
temp = list(filter_dp)
def test_filter_datapipe_nested_list(self):
input_ds = IDP(range(10)).batch(5)
def _filter_fn(data, val):
return data >= val
filter_dp = input_ds.filter(nesting_level=-1, filter_fn=_filter_fn, fn_kwargs={'val': 5})
expected_dp1 = [[5, 6, 7, 8, 9]]
self.assertEqual(len(list(filter_dp)), len(expected_dp1))
for data, exp in zip(filter_dp, expected_dp1):
self.assertEqual(data, exp)
filter_dp = input_ds.filter(nesting_level=-1, drop_empty_batches=False,
filter_fn=_filter_fn, fn_kwargs={'val': 5})
expected_dp2: List[List[int]] = [[], [5, 6, 7, 8, 9]]
self.assertEqual(len(list(filter_dp)), len(expected_dp2))
for data, exp in zip(filter_dp, expected_dp2):
self.assertEqual(data, exp)
with self.assertRaises(IndexError):
filter_dp = input_ds.filter(nesting_level=5, filter_fn=_filter_fn, fn_kwargs={'val': 5})
temp = list(filter_dp)
input_ds = IDP(range(10)).batch(3)
filter_dp = input_ds.filter(lambda ls: len(ls) >= 3)
expected_dp3: List[List[int]] = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
self.assertEqual(len(list(filter_dp)), len(expected_dp3))
for data, exp in zip(filter_dp, expected_dp3):
self.assertEqual(data, exp)
input_ds = IDP([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [1, 2, 3]]])
filter_dp = input_ds.filter(lambda x: x > 3, nesting_level=-1)
expected_dp4 = [[[4, 5]], [[6, 7, 8]]]
self.assertEqual(len(list(filter_dp)), len(expected_dp4))
for data2, exp2 in zip(filter_dp, expected_dp4):
self.assertEqual(data2, exp2)
input_ds = IDP([[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [1, 2, 3]]])
filter_dp = input_ds.filter(lambda x: x > 7, nesting_level=-1)
expected_dp5 = [[[8]]]
self.assertEqual(len(list(filter_dp)), len(expected_dp5))
for data2, exp2 in zip(filter_dp, expected_dp5):
self.assertEqual(data2, exp2)
input_ds = IDP([[[0, 1], [3, 4]], [[6, 7, 8], [1, 2, 3]]])
filter_dp = input_ds.filter(lambda ls: len(ls) >= 3, nesting_level=1)
expected_dp6 = [[[6, 7, 8], [1, 2, 3]]]
self.assertEqual(len(list(filter_dp)), len(expected_dp6))
for data2, exp2 in zip(filter_dp, expected_dp6):
self.assertEqual(data2, exp2)
def test_sampler_datapipe(self):
input_dp = IDP(range(10))
# Default SequentialSampler
sampled_dp = dp.iter.Sampler(input_dp) # type: ignore[var-annotated]
self.assertEqual(len(sampled_dp), 10)
for i, x in enumerate(sampled_dp):
self.assertEqual(x, i)
# RandomSampler
random_sampled_dp = dp.iter.Sampler(input_dp, sampler=RandomSampler, sampler_kwargs={'replacement': True}) # type: ignore[var-annotated] # noqa: B950
# Requires `__len__` to build SamplerDataPipe
input_dp_nolen = IDP_NoLen(range(10))
with self.assertRaises(AssertionError):
sampled_dp = dp.iter.Sampler(input_dp_nolen)
def test_shuffle_datapipe(self):
exp = list(range(20))
input_ds = IDP(exp)
with self.assertRaises(AssertionError):
shuffle_dp = input_ds.shuffle(buffer_size=0)
for bs in (5, 20, 25):
shuffle_dp = input_ds.shuffle(buffer_size=bs)
self.assertEqual(len(shuffle_dp), len(input_ds))
random.seed(123)
res = list(shuffle_dp)
self.assertEqual(sorted(res), exp)
# Test Deterministic
for num_workers in (0, 1):
random.seed(123)
dl = DataLoader(shuffle_dp, num_workers=num_workers, worker_init_fn=_worker_init_fn)
dl_res = list(dl)
self.assertEqual(res, dl_res)
shuffle_dp_nl = IDP_NoLen(range(20)).shuffle(buffer_size=5)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(shuffle_dp_nl)
@skipIfNoTorchVision
def test_transforms_datapipe(self):
torch.set_default_dtype(torch.float)
# A sequence of numpy random numbers representing 3-channel images
w = h = 32
inputs = [np.random.randint(0, 255, (h, w, 3), dtype=np.uint8) for i in range(10)]
tensor_inputs = [torch.tensor(x, dtype=torch.float).permute(2, 0, 1) / 255. for x in inputs]
input_dp = IDP(inputs)
# Raise TypeError for python function
with self.assertRaisesRegex(TypeError, r"`transforms` are required to be"):
input_dp.legacy_transforms(_fake_fn)
# transforms.Compose of several transforms
transforms = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Pad(1, fill=1, padding_mode='constant'),
])
tsfm_dp = input_dp.legacy_transforms(transforms)
self.assertEqual(len(tsfm_dp), len(input_dp))
for tsfm_data, input_data in zip(tsfm_dp, tensor_inputs):
self.assertEqual(tsfm_data[:, 1:(h + 1), 1:(w + 1)], input_data)
# nn.Sequential of several transforms (required to be instances of nn.Module)
input_dp = IDP(tensor_inputs)
transforms = nn.Sequential(
torchvision.transforms.Pad(1, fill=1, padding_mode='constant'),
)
tsfm_dp = input_dp.legacy_transforms(transforms)
self.assertEqual(len(tsfm_dp), len(input_dp))
for tsfm_data, input_data in zip(tsfm_dp, tensor_inputs):
self.assertEqual(tsfm_data[:, 1:(h + 1), 1:(w + 1)], input_data)
# Single transform
input_dp = IDP_NoLen(inputs) # type: ignore[assignment]
transform = torchvision.transforms.ToTensor()
tsfm_dp = input_dp.legacy_transforms(transform)
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(tsfm_dp)
for tsfm_data, input_data in zip(tsfm_dp, tensor_inputs):
self.assertEqual(tsfm_data, input_data)
def test_zip_datapipe(self):
with self.assertRaises(TypeError):
dp.iter.Zip(IDP(range(10)), list(range(10))) # type: ignore[arg-type]
zipped_dp = dp.iter.Zip(IDP(range(10)), IDP_NoLen(range(5))) # type: ignore[var-annotated]
with self.assertRaisesRegex(TypeError, r"instance doesn't have valid length$"):
len(zipped_dp)
exp = list((i, i) for i in range(5))
self.assertEqual(list(zipped_dp), exp)
zipped_dp = dp.iter.Zip(IDP(range(10)), IDP(range(5)))
self.assertEqual(len(zipped_dp), 5)
self.assertEqual(list(zipped_dp), exp)
# Reset
self.assertEqual(list(zipped_dp), exp)
class TestFunctionalMapDataPipe(TestCase):
# TODO(VitalyFedyunin): If dill installed this test fails
def _test_picklable(self):
arr = range(10)
picklable_datapipes: List[
Tuple[Type[MapDataPipe], MapDataPipe, Tuple, Dict[str, Any]]
] = [
(dp.map.Map, MDP(arr), (), {}),
(dp.map.Map, MDP(arr), (_fake_fn, (0,), {'test': True}), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in picklable_datapipes:
p = pickle.dumps(dpipe(input_dp, *dp_args, **dp_kwargs)) # type: ignore[call-arg]
unpicklable_datapipes: List[
Tuple[Type[MapDataPipe], MapDataPipe, Tuple, Dict[str, Any]]
] = [
(dp.map.Map, MDP(arr), (lambda x: x,), {}),
]
for dpipe, input_dp, dp_args, dp_kwargs in unpicklable_datapipes:
with warnings.catch_warnings(record=True) as wa:
datapipe = dpipe(input_dp, *dp_args, **dp_kwargs) # type: ignore[call-arg]
self.assertEqual(len(wa), 1)
self.assertRegex(
str(wa[0].message), r"^Lambda function is not supported for pickle"
)
with self.assertRaises(AttributeError):
p = pickle.dumps(datapipe)
def test_concat_datapipe(self):
input_dp1 = MDP(range(10))
input_dp2 = MDP(range(5))
with self.assertRaisesRegex(ValueError, r"Expected at least one DataPipe"):
dp.map.Concat()
with self.assertRaisesRegex(TypeError, r"Expected all inputs to be `MapDataPipe`"):
dp.map.Concat(input_dp1, ()) # type: ignore[arg-type]
concat_dp = input_dp1.concat(input_dp2)
self.assertEqual(len(concat_dp), 15)
for index in range(15):
self.assertEqual(concat_dp[index], (list(range(10)) + list(range(5)))[index])
self.assertEqual(list(concat_dp), list(range(10)) + list(range(5)))
def test_map_datapipe(self):
arr = range(10)
input_dp = MDP(arr)
def fn(item, dtype=torch.float, *, sum=False):
data = torch.tensor(item, dtype=dtype)
return data if not sum else data.sum()
map_dp = input_dp.map(fn)
self.assertEqual(len(input_dp), len(map_dp))
for index in arr:
self.assertEqual(
map_dp[index], torch.tensor(input_dp[index], dtype=torch.float)
)
map_dp = input_dp.map(fn=fn, fn_args=(torch.int,), fn_kwargs={'sum': True})
self.assertEqual(len(input_dp), len(map_dp))
for index in arr:
self.assertEqual(
map_dp[index], torch.tensor(input_dp[index], dtype=torch.int).sum()
)
from functools import partial
map_dp = input_dp.map(partial(fn, dtype=torch.int, sum=True))
self.assertEqual(len(input_dp), len(map_dp))
for index in arr:
self.assertEqual(
map_dp[index], torch.tensor(input_dp[index], dtype=torch.int).sum()
)
# Metaclass conflict for Python 3.6
# Multiple inheritance with NamedTuple is not supported for Python 3.9
_generic_namedtuple_allowed = sys.version_info >= (3, 7) and sys.version_info < (3, 9)
if _generic_namedtuple_allowed:
class InvalidData(Generic[T_co], NamedTuple):
name: str
data: T_co
class TestTyping(TestCase):
def test_subtype(self):
from torch.utils.data._typing import issubtype
basic_type = (int, str, bool, float, complex,
list, tuple, dict, set, T_co)
for t in basic_type:
self.assertTrue(issubtype(t, t))
self.assertTrue(issubtype(t, Any))
if t == T_co:
self.assertTrue(issubtype(Any, t))
else:
self.assertFalse(issubtype(Any, t))
for t1, t2 in itertools.product(basic_type, basic_type):
if t1 == t2 or t2 == T_co:
self.assertTrue(issubtype(t1, t2))
else:
self.assertFalse(issubtype(t1, t2))
T = TypeVar('T', int, str)
S = TypeVar('S', bool, Union[str, int], Tuple[int, T]) # type: ignore[valid-type]
types = ((int, Optional[int]),
(List, Union[int, list]),
(Tuple[int, str], S),
(Tuple[int, str], tuple),
(T, S),
(S, T_co),
(T, Union[S, Set]))
for sub, par in types:
self.assertTrue(issubtype(sub, par))
self.assertFalse(issubtype(par, sub))
subscriptable_types = {
List: 1,
Tuple: 2, # use 2 parameters
Set: 1,
Dict: 2,
}
for subscript_type, n in subscriptable_types.items():
for ts in itertools.combinations(types, n):
subs, pars = zip(*ts)
sub = subscript_type[subs] # type: ignore[index]
par = subscript_type[pars] # type: ignore[index]
self.assertTrue(issubtype(sub, par))
self.assertFalse(issubtype(par, sub))
# Non-recursive check
self.assertTrue(issubtype(par, sub, recursive=False))
def test_issubinstance(self):
from torch.utils.data._typing import issubinstance
basic_data = (1, '1', True, 1., complex(1., 0.))
basic_type = (int, str, bool, float, complex)
S = TypeVar('S', bool, Union[str, int])
for d in basic_data:
self.assertTrue(issubinstance(d, Any))
self.assertTrue(issubinstance(d, T_co))
if type(d) in (bool, int, str):
self.assertTrue(issubinstance(d, S))
else:
self.assertFalse(issubinstance(d, S))
for t in basic_type:
if type(d) == t:
self.assertTrue(issubinstance(d, t))
else:
self.assertFalse(issubinstance(d, t))
# list/set
dt = (([1, '1', 2], List), (set({1, '1', 2}), Set))
for d, t in dt:
self.assertTrue(issubinstance(d, t))
self.assertTrue(issubinstance(d, t[T_co])) # type: ignore[index]
self.assertFalse(issubinstance(d, t[int])) # type: ignore[index]
# dict
d = dict({'1': 1, '2': 2.})
self.assertTrue(issubinstance(d, Dict))
self.assertTrue(issubinstance(d, Dict[str, T_co]))
self.assertFalse(issubinstance(d, Dict[str, int]))
# tuple
d = (1, '1', 2)
self.assertTrue(issubinstance(d, Tuple))
self.assertTrue(issubinstance(d, Tuple[int, str, T_co]))
self.assertFalse(issubinstance(d, Tuple[int, Any]))
self.assertFalse(issubinstance(d, Tuple[int, int, int]))
# Static checking annotation
def test_compile_time(self):
with self.assertRaisesRegex(TypeError, r"Expected 'Iterator' as the return"):
class InvalidDP1(IterDataPipe[int]):
def __iter__(self) -> str: # type: ignore[misc, override]
yield 0
with self.assertRaisesRegex(TypeError, r"Expected return type of '__iter__'"):
class InvalidDP2(IterDataPipe[Tuple]):
def __iter__(self) -> Iterator[int]: # type: ignore[override]
yield 0
with self.assertRaisesRegex(TypeError, r"Expected return type of '__iter__'"):
class InvalidDP3(IterDataPipe[Tuple[int, str]]):
def __iter__(self) -> Iterator[tuple]: # type: ignore[override]
yield (0, )
if _generic_namedtuple_allowed:
with self.assertRaisesRegex(TypeError, r"is not supported by Python typing"):
class InvalidDP4(IterDataPipe["InvalidData[int]"]): # type: ignore[type-arg, misc]
pass
class DP1(IterDataPipe[Tuple[int, str]]):
def __init__(self, length):
self.length = length
def __iter__(self) -> Iterator[Tuple[int, str]]:
for d in range(self.length):
yield d, str(d)
self.assertTrue(issubclass(DP1, IterDataPipe))
dp1 = DP1(10)
self.assertTrue(DP1.type.issubtype(dp1.type) and dp1.type.issubtype(DP1.type))
dp2 = DP1(5)
self.assertEqual(dp1.type, dp2.type)
with self.assertRaisesRegex(TypeError, r"is not a generic class"):
class InvalidDP5(DP1[tuple]): # type: ignore[type-arg]
def __iter__(self) -> Iterator[tuple]: # type: ignore[override]
yield (0, )
class DP2(IterDataPipe[T_co]):
def __iter__(self) -> Iterator[T_co]:
for d in range(10):
yield d # type: ignore[misc]
self.assertTrue(issubclass(DP2, IterDataPipe))
dp1 = DP2() # type: ignore[assignment]
self.assertTrue(DP2.type.issubtype(dp1.type) and dp1.type.issubtype(DP2.type))
dp2 = DP2() # type: ignore[assignment]
self.assertEqual(dp1.type, dp2.type)
class DP3(IterDataPipe[Tuple[T_co, str]]):
r""" DataPipe without fixed type with __init__ function"""
def __init__(self, datasource):
self.datasource = datasource
def __iter__(self) -> Iterator[Tuple[T_co, str]]:
for d in self.datasource:
yield d, str(d)
self.assertTrue(issubclass(DP3, IterDataPipe))
dp1 = DP3(range(10)) # type: ignore[assignment]
self.assertTrue(DP3.type.issubtype(dp1.type) and dp1.type.issubtype(DP3.type))
dp2 = DP3(5) # type: ignore[assignment]
self.assertEqual(dp1.type, dp2.type)
class DP4(IterDataPipe[tuple]):
r""" DataPipe without __iter__ annotation"""
def __iter__(self):
raise NotImplementedError
self.assertTrue(issubclass(DP4, IterDataPipe))
dp = DP4()
self.assertTrue(dp.type.param == tuple)
class DP5(IterDataPipe):
r""" DataPipe without type annotation"""
def __iter__(self) -> Iterator[str]:
raise NotImplementedError
self.assertTrue(issubclass(DP5, IterDataPipe))
dp = DP5() # type: ignore[assignment]
from torch.utils.data._typing import issubtype
self.assertTrue(issubtype(dp.type.param, Any) and issubtype(Any, dp.type.param))
class DP6(IterDataPipe[int]):
r""" DataPipe with plain Iterator"""
def __iter__(self) -> Iterator:
raise NotImplementedError
self.assertTrue(issubclass(DP6, IterDataPipe))
dp = DP6() # type: ignore[assignment]
self.assertTrue(dp.type.param == int)
class DP7(IterDataPipe[Awaitable[T_co]]):
r""" DataPipe with abstract base class"""
self.assertTrue(issubclass(DP6, IterDataPipe))
self.assertTrue(DP7.type.param == Awaitable[T_co])
class DP8(DP7[str]):
r""" DataPipe subclass from a DataPipe with abc type"""
self.assertTrue(issubclass(DP8, IterDataPipe))
self.assertTrue(DP8.type.param == Awaitable[str])
def test_construct_time(self):
class DP0(IterDataPipe[Tuple]):
@argument_validation
def __init__(self, dp: IterDataPipe):
self.dp = dp
def __iter__(self) -> Iterator[Tuple]:
for d in self.dp:
yield d, str(d)
class DP1(IterDataPipe[int]):
@argument_validation
def __init__(self, dp: IterDataPipe[Tuple[int, str]]):
self.dp = dp
def __iter__(self) -> Iterator[int]:
for a, b in self.dp:
yield a
# Non-DataPipe input with DataPipe hint
datasource = [(1, '1'), (2, '2'), (3, '3')]
with self.assertRaisesRegex(TypeError, r"Expected argument 'dp' as a IterDataPipe"):
dp = DP0(datasource)
dp = DP0(IDP(range(10)))
with self.assertRaisesRegex(TypeError, r"Expected type of argument 'dp' as a subtype"):
dp = DP1(dp)
def test_runtime(self):
class DP(IterDataPipe[Tuple[int, T_co]]):
def __init__(self, datasource):
self.ds = datasource
@runtime_validation
def __iter__(self) -> Iterator[Tuple[int, T_co]]:
for d in self.ds:
yield d
dss = ([(1, '1'), (2, '2')],
[(1, 1), (2, '2')])
for ds in dss:
dp = DP(ds) # type: ignore[var-annotated]
self.assertEqual(list(dp), ds)
# Reset __iter__
self.assertEqual(list(dp), ds)
dss = ([(1, 1), ('2', 2)], # type: ignore[assignment, list-item]
[[1, '1'], [2, '2']], # type: ignore[list-item]
[1, '1', 2, '2'])
for ds in dss:
dp = DP(ds)
with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"):
list(dp)
with runtime_validation_disabled():
self.assertEqual(list(dp), ds)
with runtime_validation_disabled():
self.assertEqual(list(dp), ds)
with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"):
list(dp)
def test_reinforce(self):
T = TypeVar('T', int, str)
class DP(IterDataPipe[T]):
def __init__(self, ds):
self.ds = ds
@runtime_validation
def __iter__(self) -> Iterator[T]:
for d in self.ds:
yield d
ds = list(range(10))
# Valid type reinforcement
dp = DP(ds).reinforce_type(int)
self.assertTrue(dp.type, int)
self.assertEqual(list(dp), ds)
# Invalid type
with self.assertRaisesRegex(TypeError, r"'expected_type' must be a type"):
dp = DP(ds).reinforce_type(1)
# Type is not subtype
with self.assertRaisesRegex(TypeError, r"Expected 'expected_type' as subtype of"):
dp = DP(ds).reinforce_type(float)
# Invalid data at runtime
dp = DP(ds).reinforce_type(str)
with self.assertRaisesRegex(RuntimeError, r"Expected an instance as subtype"):
list(dp)
# Context Manager to disable the runtime validation
with runtime_validation_disabled():
self.assertEqual(list(d for d in dp), ds)
class NumbersDataset(IterDataPipe):
def __init__(self, size=10):
self.size = size
def __iter__(self):
for i in range(self.size):
yield i
class TestGraph(TestCase):
@skipIfNoDill
def test_simple_traverse(self):
numbers_dp = NumbersDataset(size=50)
mapped_dp = numbers_dp.map(lambda x: x * 10)
graph = torch.utils.data.graph.traverse(mapped_dp)
expected: Dict[Any, Any] = {mapped_dp: {numbers_dp: {}}}
self.assertEqual(expected, graph)
# TODO(VitalyFedyunin): This test is incorrect because of 'buffer' nature
# of the fork fake implementation, update fork first and fix this test too
@skipIfNoDill
def test_traverse_forked(self):
numbers_dp = NumbersDataset(size=50)
dp0, dp1, dp2 = numbers_dp.fork(3)
dp0_upd = dp0.map(lambda x: x * 10)
dp1_upd = dp1.filter(lambda x: x % 3 == 1)
combined_dp = dp0_upd.mux(dp1_upd, dp2)
graph = torch.utils.data.graph.traverse(combined_dp)
expected = {combined_dp: {dp0_upd: {dp0: {}}, dp1_upd: {dp1: {}}, dp2: {}}}
self.assertEqual(expected, graph)
class TestSharding(TestCase):
def _get_pipeline(self):
numbers_dp = NumbersDataset(size=10)
dp0, dp1 = numbers_dp.fork(2)
dp0_upd = dp0.map(lambda x: x * 10)
dp1_upd = dp1.filter(lambda x: x % 3 == 1)
combined_dp = dp0_upd.mux(dp1_upd)
return combined_dp
@skipIfNoDill
def test_simple_sharding(self):
sharded_dp = self._get_pipeline().sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp, 3, 1)
items = list(sharded_dp)
self.assertEqual([1, 20, 40, 70], items)
all_items = list(self._get_pipeline())
items = []
for i in range(3):
sharded_dp = self._get_pipeline().sharding_filter()
torch.utils.data.sharding.apply_sharding(sharded_dp, 3, i)
items += list(sharded_dp)
self.assertEqual(sorted(all_items), sorted(items))
@skipIfNoDill
def test_old_dataloader(self):
dp = self._get_pipeline()
expected = list(dp)
dp = self._get_pipeline().sharding_filter()
dl = DataLoader(dp, batch_size=1, shuffle=False, num_workers=2,
worker_init_fn=torch.utils.data.backward_compatibility.worker_init_fn)
items = []
for i in dl:
items.append(i)
self.assertEqual(sorted(expected), sorted(items))
if __name__ == '__main__':
run_tests()
|
DistributedEngine.py | #! /usr/bin/enc python
# -*- coding: utf-8 -*-
# author: Irving He
# email: 1910646@tongji.edu.cn
"""
分布式架构
"""
import copy
from datetime import datetime
from multiprocessing import set_start_method
import torch.multiprocessing as torch_mp
import multiprocessing as mp
import queue
from time import sleep
import os
from Networks import PolicyNetwork
from Networks import C51ValueNetwork
from utils import Logger
from utils import empty_torch_queue
from utils import create_replay_buffer
from AgentWorker import Agent
from D4PG_LearnerWorker import D4PGLearnerWorker as LearnerD4PG
class DistributedEngine(object):
def __init__(self,config):
self.config = config
def train(self):
config = self.config
batch_queue_size = config['batch_queue_size']
n_agents = config['num_agents'] # Nums of Actors
# 创建实验的目录
experiment_dir = f"{config['results_path']}/{config['env']}-{config['model']}-{datetime.now():%Y-%m-%d_%H%M%S}"
if not os.path.exists(experiment_dir):
os.makedirs(experiment_dir,exist_ok=True)
# 数据结构
Processes = []
replay_queue = mp.Queue(maxsize=config['replay_queue_size']) # FIFO QUEUE
learner_w_queue = torch_mp.Queue(maxsize=n_agents)
replay_priorities_queue = mp.Queue(maxsize=config['replay_queue_size'])
# 用于多进程共享的状态变量
training_on = mp.Value('i',1)
update_step = mp.Value('i',0)
global_episode = mp.Value('i',0) # 共享变量
# Data Sampler -- 采样进程
# (1)
batch_queue = mp.Queue(maxsize=batch_queue_size)
p = torch_mp.Process(target=sampler_worker,
args=(config, replay_queue, batch_queue, replay_priorities_queue, training_on,
global_episode, update_step, experiment_dir))
Processes.append(p)
# Learner -- 学习进程
# (2)
policy_net = PolicyNetwork(config['state_dim'], config['action_dim'],
config['dense_size'], device=config['device'])
target_policy_net = copy.deepcopy(policy_net)
policy_net_cpu = PolicyNetwork(config['state_dim'], config['action_dim'],
config['dense_size'], device=config['agent_device'])
# 记住每次将Tensor放入multiprocessing.Queue时,必须将其移动到共享内存中
target_policy_net.share_memory()
p = torch_mp.Process(target=learner_worker, args=(config, training_on, policy_net, target_policy_net, learner_w_queue,
replay_priorities_queue, batch_queue, update_step, experiment_dir))
Processes.append(p)
# Single Agent For "利用"
# (3)
p = torch_mp.Process(target=agent_worker,
args=(config, target_policy_net, None, global_episode, 0, "exploitation", experiment_dir,
training_on, replay_queue, update_step))
Processes.append(p)
# (4,5,6,7)
# Agents(探索进程) "探索"
for i in range(n_agents):
p = torch_mp.Process(target=agent_worker,
args=(config, copy.deepcopy(policy_net_cpu), learner_w_queue, global_episode,
i+1, "exploration", experiment_dir, training_on, replay_queue, update_step))
Processes.append(p)
for p in Processes:
p.start()
for p in Processes:
p.join()
print("-----[End...]----- ")
"""[Sampling worker]"""
def sampler_worker(config,replay_queue, batch_queue, replay_priorities_queue, training_on, global_episode, update_step,log_dir=''):
batch_size = config['batch_size']
logger = Logger(f"{log_dir}/data_struct")
# Create replay buffer
replay_buffer = create_replay_buffer(config)
while training_on.value: # 共享变量
# Step1.将replays迁移到Global buffer
"""图中: [replay_queue] --(s,a,r,s',d)--> [replay_buffer]"""
n = replay_queue.qsize()
for _ in range(n):
replay = replay_queue.get()
replay_buffer.add(*replay)
# Step2.将replay buffer中的数据迁移到batch_queue,并过呢更新replay_priority_queue中的权重
if len(replay_buffer) < batch_size:
# 超过batch_size才开始后面的sample
continue
"""图中:[batch_queue]--(transition priority)-->[replay_priority_queue]"""
try:
# 更新各条Transition权重
# get_nowait(): Remove and return an item from the queue without blocking.
inds, weights = replay_priorities_queue.get_nowait()
replay_buffer.update_priorities(inds,weights)
except queue.Empty:
pass
"""图中:[replay_buffer]--(batch sample)-->[batch_queue]"""
try:
batch = replay_buffer.sample(batch_size)
batch_queue.put_nowait(batch)
except:
sleep(0.1)
continue
# 记录Data Structures Sizes:
step = update_step.value
logger.scalar_summary("data_struct/global_episode", global_episode.value, step)
logger.scalar_summary("data_struct/replay_queue", replay_queue.qsize(), step)
logger.scalar_summary("data_struct/batch_queue", batch_queue.qsize(), step)
logger.scalar_summary("data_struct/replay_buffer", len(replay_buffer), step)
if config['save_buffer_on_disk']:
replay_buffer.dump(config['results_path'])
empty_torch_queue(batch_queue)
print("-----[Stop Sampler worker...]-----")
"""[Learner Worker]"""
def learner_worker(config,training_on,policy,target_policy_net,learner_w_queue, replay_priority_queue,batch_queue,update_step,experiment_dir):
learner = LearnerD4PG(config, policy, target_policy_net, learner_w_queue, log_dir=experiment_dir)
learner.run(training_on, batch_queue, replay_priority_queue, update_step)
"""[Evaluation Worker(no noise)] & [Exploration Worker]"""
def agent_worker(config, policy, learner_w_queue, global_episode, i, agent_type,
experiment_dir, training_on, replay_queue, update_step):
agent = Agent(config,
policy=policy,
global_episode=global_episode,
n_agent=i,
agent_type=agent_type,
log_dir=experiment_dir)
agent.run(training_on, replay_queue, learner_w_queue, update_step)
def load_engine(config):
print(f"Loading {config['model']} for {config['env']}.")
if config["model"] == "d4pg":
return DistributedEngine(config)
if config["model"] in ["ddpg", "d3pg"]:
pass
# 测试进程共享变量
# def func1(a,arr):
# a.value = 3.14
# for i in range(len(arr)):
# arr[i] = -arr[i]
# if __name__ == "__main__":
# # Value Array 是通过共享内存的方式共享数据
# # Manager是通过共享进程的方式共享数据
# # import multiprocessing
# # num = multiprocessing.Value('d',1.0)
# # arr = multiprocessing.Array('i',range(10))
# # p = multiprocessing.Process(target=func1,args=(num,arr))
# # p.start()
# # p.join()
# # print(num.value) # 3.14
# # print(arr[:]) # [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]
# pass |
notebookapp.py | # coding: utf-8
"""A tornado based IPython notebook server."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import base64
import datetime
import errno
import importlib
import io
import json
import logging
import os
import random
import re
import select
import signal
import socket
import ssl
import sys
import threading
import webbrowser
# check for pyzmq
from IPython.utils.zmqrelated import check_for_zmq
check_for_zmq('13', 'IPython.html')
from jinja2 import Environment, FileSystemLoader
# Install the pyzmq ioloop. This has to be done before anything else from
# tornado is imported.
from zmq.eventloop import ioloop
ioloop.install()
# check for tornado 3.1.0
msg = "The IPython Notebook requires tornado >= 4.0"
try:
import tornado
except ImportError:
raise ImportError(msg)
try:
version_info = tornado.version_info
except AttributeError:
raise ImportError(msg + ", but you have < 1.1.0")
if version_info < (4,0):
raise ImportError(msg + ", but you have %s" % tornado.version)
from tornado import httpserver
from tornado import web
from tornado.log import LogFormatter, app_log, access_log, gen_log
from IPython.html import (
DEFAULT_STATIC_FILES_PATH,
DEFAULT_TEMPLATE_PATH_LIST,
)
from .base.handlers import Template404
from .log import log_request
from .services.kernels.kernelmanager import MappingKernelManager
from .services.config import ConfigManager
from .services.contents.manager import ContentsManager
from .services.contents.filemanager import FileContentsManager
from .services.clusters.clustermanager import ClusterManager
from .services.sessions.sessionmanager import SessionManager
from .auth.login import LoginHandler
from .auth.logout import LogoutHandler
from .base.handlers import IPythonHandler, FileFindHandler
from IPython.config import Config
from IPython.config.application import catch_config_error, boolean_flag
from IPython.core.application import (
BaseIPythonApplication, base_flags, base_aliases,
)
from IPython.core.profiledir import ProfileDir
from IPython.kernel import KernelManager
from IPython.kernel.kernelspec import KernelSpecManager
from IPython.kernel.zmq.session import Session
from IPython.nbformat.sign import NotebookNotary
from IPython.utils.importstring import import_item
from IPython.utils import submodule
from IPython.utils.process import check_pid
from IPython.utils.traitlets import (
Dict, Unicode, Integer, List, Bool, Bytes, Instance,
TraitError, Type,
)
from IPython.utils import py3compat
from IPython.utils.path import filefind, get_ipython_dir
from IPython.utils.sysinfo import get_sys_info
from .nbextensions import SYSTEM_NBEXTENSIONS_DIRS
from .utils import url_path_join
#-----------------------------------------------------------------------------
# Module globals
#-----------------------------------------------------------------------------
_examples = """
ipython notebook # start the notebook
ipython notebook --profile=sympy # use the sympy profile
ipython notebook --certfile=mycert.pem # use SSL/TLS certificate
"""
#-----------------------------------------------------------------------------
# Helper functions
#-----------------------------------------------------------------------------
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def load_handlers(name):
"""Load the (URL pattern, handler) tuples for each component."""
name = 'IPython.html.' + name
mod = __import__(name, fromlist=['default_handlers'])
return mod.default_handlers
#-----------------------------------------------------------------------------
# The Tornado web application
#-----------------------------------------------------------------------------
class NotebookWebApplication(web.Application):
def __init__(self, ipython_app, kernel_manager, contents_manager,
cluster_manager, session_manager, kernel_spec_manager,
config_manager, log,
base_url, default_url, settings_overrides, jinja_env_options):
settings = self.init_settings(
ipython_app, kernel_manager, contents_manager, cluster_manager,
session_manager, kernel_spec_manager, config_manager, log, base_url,
default_url, settings_overrides, jinja_env_options)
handlers = self.init_handlers(settings)
super(NotebookWebApplication, self).__init__(handlers, **settings)
def init_settings(self, ipython_app, kernel_manager, contents_manager,
cluster_manager, session_manager, kernel_spec_manager,
config_manager,
log, base_url, default_url, settings_overrides,
jinja_env_options=None):
_template_path = settings_overrides.get(
"template_path",
ipython_app.template_file_path,
)
if isinstance(_template_path, py3compat.string_types):
_template_path = (_template_path,)
template_path = [os.path.expanduser(path) for path in _template_path]
jenv_opt = jinja_env_options if jinja_env_options else {}
env = Environment(loader=FileSystemLoader(template_path), **jenv_opt)
sys_info = get_sys_info()
if sys_info['commit_source'] == 'repository':
# don't cache (rely on 304) when working from master
version_hash = ''
else:
# reset the cache on server restart
version_hash = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
settings = dict(
# basics
log_function=log_request,
base_url=base_url,
default_url=default_url,
template_path=template_path,
static_path=ipython_app.static_file_path,
static_handler_class = FileFindHandler,
static_url_prefix = url_path_join(base_url,'/static/'),
static_handler_args = {
# don't cache custom.js
'no_cache_paths': [url_path_join(base_url, 'static', 'custom')],
},
version_hash=version_hash,
# authentication
cookie_secret=ipython_app.cookie_secret,
login_url=url_path_join(base_url,'/login'),
login_handler_class=ipython_app.login_handler_class,
logout_handler_class=ipython_app.logout_handler_class,
password=ipython_app.password,
# managers
kernel_manager=kernel_manager,
contents_manager=contents_manager,
cluster_manager=cluster_manager,
session_manager=session_manager,
kernel_spec_manager=kernel_spec_manager,
config_manager=config_manager,
# IPython stuff
jinja_template_vars=ipython_app.jinja_template_vars,
nbextensions_path=ipython_app.nbextensions_path,
websocket_url=ipython_app.websocket_url,
mathjax_url=ipython_app.mathjax_url,
config=ipython_app.config,
jinja2_env=env,
terminals_available=False, # Set later if terminals are available
)
# allow custom overrides for the tornado web app.
settings.update(settings_overrides)
return settings
def init_handlers(self, settings):
"""Load the (URL pattern, handler) tuples for each component."""
# Order matters. The first handler to match the URL will handle the request.
handlers = []
handlers.extend(load_handlers('tree.handlers'))
handlers.extend([(r"/login", settings['login_handler_class'])])
handlers.extend([(r"/logout", settings['logout_handler_class'])])
handlers.extend(load_handlers('files.handlers'))
handlers.extend(load_handlers('notebook.handlers'))
handlers.extend(load_handlers('nbconvert.handlers'))
handlers.extend(load_handlers('kernelspecs.handlers'))
handlers.extend(load_handlers('edit.handlers'))
handlers.extend(load_handlers('services.config.handlers'))
handlers.extend(load_handlers('services.kernels.handlers'))
handlers.extend(load_handlers('services.contents.handlers'))
handlers.extend(load_handlers('services.clusters.handlers'))
handlers.extend(load_handlers('services.sessions.handlers'))
handlers.extend(load_handlers('services.nbconvert.handlers'))
handlers.extend(load_handlers('services.kernelspecs.handlers'))
handlers.extend(load_handlers('services.security.handlers'))
handlers.append(
(r"/nbextensions/(.*)", FileFindHandler, {
'path': settings['nbextensions_path'],
'no_cache_paths': ['/'], # don't cache anything in nbextensions
}),
)
# register base handlers last
handlers.extend(load_handlers('base.handlers'))
# set the URL that will be redirected from `/`
handlers.append(
(r'/?', web.RedirectHandler, {
'url' : settings['default_url'],
'permanent': False, # want 302, not 301
})
)
# prepend base_url onto the patterns that we match
new_handlers = []
for handler in handlers:
pattern = url_path_join(settings['base_url'], handler[0])
new_handler = tuple([pattern] + list(handler[1:]))
new_handlers.append(new_handler)
# add 404 on the end, which will catch everything that falls through
new_handlers.append((r'(.*)', Template404))
return new_handlers
class NbserverListApp(BaseIPythonApplication):
description="List currently running notebook servers in this profile."
flags = dict(
json=({'NbserverListApp': {'json': True}},
"Produce machine-readable JSON output."),
)
json = Bool(False, config=True,
help="If True, each line of output will be a JSON object with the "
"details from the server info file.")
def start(self):
if not self.json:
print("Currently running servers:")
for serverinfo in list_running_servers(self.profile):
if self.json:
print(json.dumps(serverinfo))
else:
print(serverinfo['url'], "::", serverinfo['notebook_dir'])
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(base_flags)
flags['no-browser']=(
{'NotebookApp' : {'open_browser' : False}},
"Don't open the notebook in a browser after startup."
)
flags['pylab']=(
{'NotebookApp' : {'pylab' : 'warn'}},
"DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib."
)
flags['no-mathjax']=(
{'NotebookApp' : {'enable_mathjax' : False}},
"""Disable MathJax
MathJax is the javascript library IPython uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
# Add notebook manager flags
flags.update(boolean_flag('script', 'FileContentsManager.save_script',
'DEPRECATED, IGNORED',
'DEPRECATED, IGNORED'))
aliases = dict(base_aliases)
aliases.update({
'ip': 'NotebookApp.ip',
'port': 'NotebookApp.port',
'port-retries': 'NotebookApp.port_retries',
'transport': 'KernelManager.transport',
'keyfile': 'NotebookApp.keyfile',
'certfile': 'NotebookApp.certfile',
'notebook-dir': 'NotebookApp.notebook_dir',
'browser': 'NotebookApp.browser',
'pylab': 'NotebookApp.pylab',
})
#-----------------------------------------------------------------------------
# NotebookApp
#-----------------------------------------------------------------------------
class NotebookApp(BaseIPythonApplication):
name = 'ipython-notebook'
description = """
The IPython HTML Notebook.
This launches a Tornado based HTML Notebook Server that serves up an
HTML5/Javascript Notebook client.
"""
examples = _examples
aliases = aliases
flags = flags
classes = [
KernelManager, ProfileDir, Session, MappingKernelManager,
ContentsManager, FileContentsManager, NotebookNotary,
KernelSpecManager,
]
flags = Dict(flags)
aliases = Dict(aliases)
subcommands = dict(
list=(NbserverListApp, NbserverListApp.description.splitlines()[0]),
)
ipython_kernel_argv = List(Unicode)
_log_formatter_cls = LogFormatter
def _log_level_default(self):
return logging.INFO
def _log_datefmt_default(self):
"""Exclude date from default date format"""
return "%H:%M:%S"
def _log_format_default(self):
"""override default log format to include time"""
return u"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s]%(end_color)s %(message)s"
# create requested profiles by default, if they don't exist:
auto_create = Bool(True)
# file to be opened in the notebook server
file_to_run = Unicode('', config=True)
# Network related information
allow_origin = Unicode('', config=True,
help="""Set the Access-Control-Allow-Origin header
Use '*' to allow any origin to access your server.
Takes precedence over allow_origin_pat.
"""
)
allow_origin_pat = Unicode('', config=True,
help="""Use a regular expression for the Access-Control-Allow-Origin header
Requests from an origin matching the expression will get replies with:
Access-Control-Allow-Origin: origin
where `origin` is the origin of the request.
Ignored if allow_origin is set.
"""
)
allow_credentials = Bool(False, config=True,
help="Set the Access-Control-Allow-Credentials: true header"
)
default_url = Unicode('/tree', config=True,
help="The default URL to redirect to from `/`"
)
ip = Unicode('localhost', config=True,
help="The IP address the notebook server will listen on."
)
def _ip_default(self):
"""Return localhost if available, 127.0.0.1 otherwise.
On some (horribly broken) systems, localhost cannot be bound.
"""
s = socket.socket()
try:
s.bind(('localhost', 0))
except socket.error as e:
self.log.warn("Cannot bind to localhost, using 127.0.0.1 as default ip\n%s", e)
return '127.0.0.1'
else:
s.close()
return 'localhost'
def _ip_changed(self, name, old, new):
if new == u'*': self.ip = u''
port = Integer(8888, config=True,
help="The port the notebook server will listen on."
)
port_retries = Integer(50, config=True,
help="The number of additional ports to try if the specified port is not available."
)
certfile = Unicode(u'', config=True,
help="""The full path to an SSL/TLS certificate file."""
)
keyfile = Unicode(u'', config=True,
help="""The full path to a private key file for usage with SSL/TLS."""
)
cookie_secret_file = Unicode(config=True,
help="""The file where the cookie secret is stored."""
)
def _cookie_secret_file_default(self):
if self.profile_dir is None:
return ''
return os.path.join(self.profile_dir.security_dir, 'notebook_cookie_secret')
cookie_secret = Bytes(b'', config=True,
help="""The random bytes used to secure cookies.
By default this is a new random number every time you start the Notebook.
Set it to a value in a config file to enable logins to persist across server sessions.
Note: Cookie secrets should be kept private, do not share config files with
cookie_secret stored in plaintext (you can read the value from a file).
"""
)
def _cookie_secret_default(self):
if os.path.exists(self.cookie_secret_file):
with io.open(self.cookie_secret_file, 'rb') as f:
return f.read()
else:
secret = base64.encodestring(os.urandom(1024))
self._write_cookie_secret_file(secret)
return secret
def _write_cookie_secret_file(self, secret):
"""write my secret to my secret_file"""
self.log.info("Writing notebook server cookie secret to %s", self.cookie_secret_file)
with io.open(self.cookie_secret_file, 'wb') as f:
f.write(secret)
try:
os.chmod(self.cookie_secret_file, 0o600)
except OSError:
self.log.warn(
"Could not set permissions on %s",
self.cookie_secret_file
)
password = Unicode(u'', config=True,
help="""Hashed password to use for web authentication.
To generate, type in a python/IPython shell:
from IPython.lib import passwd; passwd()
The string should be of the form type:salt:hashed-password.
"""
)
open_browser = Bool(True, config=True,
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
""")
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webapp_settings = Dict(config=True,
help="DEPRECATED, use tornado_settings"
)
def _webapp_settings_changed(self, name, old, new):
self.log.warn("\n webapp_settings is deprecated, use tornado_settings.\n")
self.tornado_settings = new
tornado_settings = Dict(config=True,
help="Supply overrides for the tornado.web.Application that the "
"IPython notebook uses.")
ssl_options = Dict(config=True,
help="""Supply SSL options for the tornado HTTPServer.
See the tornado docs for details.""")
jinja_environment_options = Dict(config=True,
help="Supply extra arguments that will be passed to Jinja environment.")
jinja_template_vars = Dict(
config=True,
help="Extra variables to supply to jinja templates when rendering.",
)
enable_mathjax = Bool(True, config=True,
help="""Whether to enable MathJax for typesetting math/TeX
MathJax is the javascript library IPython uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
def _enable_mathjax_changed(self, name, old, new):
"""set mathjax url to empty if mathjax is disabled"""
if not new:
self.mathjax_url = u''
base_url = Unicode('/', config=True,
help='''The base URL for the notebook server.
Leading and trailing slashes can be omitted,
and will automatically be added.
''')
def _base_url_changed(self, name, old, new):
if not new.startswith('/'):
self.base_url = '/'+new
elif not new.endswith('/'):
self.base_url = new+'/'
base_project_url = Unicode('/', config=True, help="""DEPRECATED use base_url""")
def _base_project_url_changed(self, name, old, new):
self.log.warn("base_project_url is deprecated, use base_url")
self.base_url = new
extra_static_paths = List(Unicode, config=True,
help="""Extra paths to search for serving static files.
This allows adding javascript/css to be available from the notebook server machine,
or overriding individual files in the IPython"""
)
def _extra_static_paths_default(self):
return [os.path.join(self.profile_dir.location, 'static')]
@property
def static_file_path(self):
"""return extra paths + the default location"""
return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH]
extra_template_paths = List(Unicode, config=True,
help="""Extra paths to search for serving jinja templates.
Can be used to override templates from IPython.html.templates."""
)
def _extra_template_paths_default(self):
return []
@property
def template_file_path(self):
"""return extra paths + the default locations"""
return self.extra_template_paths + DEFAULT_TEMPLATE_PATH_LIST
extra_nbextensions_path = List(Unicode, config=True,
help="""extra paths to look for Javascript notebook extensions"""
)
@property
def nbextensions_path(self):
"""The path to look for Javascript notebook extensions"""
return self.extra_nbextensions_path + [os.path.join(get_ipython_dir(), 'nbextensions')] + SYSTEM_NBEXTENSIONS_DIRS
websocket_url = Unicode("", config=True,
help="""The base URL for websockets,
if it differs from the HTTP server (hint: it almost certainly doesn't).
Should be in the form of an HTTP origin: ws[s]://hostname[:port]
"""
)
mathjax_url = Unicode("", config=True,
help="""The url for MathJax.js."""
)
def _mathjax_url_default(self):
if not self.enable_mathjax:
return u''
static_url_prefix = self.tornado_settings.get("static_url_prefix",
url_path_join(self.base_url, "static")
)
# try local mathjax, either in nbextensions/mathjax or static/mathjax
for (url_prefix, search_path) in [
(url_path_join(self.base_url, "nbextensions"), self.nbextensions_path),
(static_url_prefix, self.static_file_path),
]:
self.log.debug("searching for local mathjax in %s", search_path)
try:
mathjax = filefind(os.path.join('mathjax', 'MathJax.js'), search_path)
except IOError:
continue
else:
url = url_path_join(url_prefix, u"mathjax/MathJax.js")
self.log.info("Serving local MathJax from %s at %s", mathjax, url)
return url
# no local mathjax, serve from CDN
url = u"https://cdn.mathjax.org/mathjax/latest/MathJax.js"
self.log.info("Using MathJax from CDN: %s", url)
return url
def _mathjax_url_changed(self, name, old, new):
if new and not self.enable_mathjax:
# enable_mathjax=False overrides mathjax_url
self.mathjax_url = u''
else:
self.log.info("Using MathJax: %s", new)
contents_manager_class = Type(
default_value=FileContentsManager,
klass=ContentsManager,
config=True,
help='The notebook manager class to use.'
)
kernel_manager_class = Type(
default_value=MappingKernelManager,
config=True,
help='The kernel manager class to use.'
)
session_manager_class = Type(
default_value=SessionManager,
config=True,
help='The session manager class to use.'
)
cluster_manager_class = Type(
default_value=ClusterManager,
config=True,
help='The cluster manager class to use.'
)
config_manager_class = Type(
default_value=ConfigManager,
config = True,
help='The config manager class to use'
)
kernel_spec_manager = Instance(KernelSpecManager)
kernel_spec_manager_class = Type(
default_value=KernelSpecManager,
config=True,
help="""
The kernel spec manager class to use. Should be a subclass
of `IPython.kernel.kernelspec.KernelSpecManager`.
The Api of KernelSpecManager is provisional and might change
without warning between this version of IPython and the next stable one.
"""
)
login_handler_class = Type(
default_value=LoginHandler,
klass=web.RequestHandler,
config=True,
help='The login handler class to use.',
)
logout_handler_class = Type(
default_value=LogoutHandler,
klass=web.RequestHandler,
config=True,
help='The logout handler class to use.',
)
trust_xheaders = Bool(False, config=True,
help=("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers"
"sent by the upstream reverse proxy. Necessary if the proxy handles SSL")
)
info_file = Unicode()
def _info_file_default(self):
info_file = "nbserver-%s.json"%os.getpid()
return os.path.join(self.profile_dir.security_dir, info_file)
pylab = Unicode('disabled', config=True,
help="""
DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib.
"""
)
def _pylab_changed(self, name, old, new):
"""when --pylab is specified, display a warning and exit"""
if new != 'warn':
backend = ' %s' % new
else:
backend = ''
self.log.error("Support for specifying --pylab on the command line has been removed.")
self.log.error(
"Please use `%pylab{0}` or `%matplotlib{0}` in the notebook itself.".format(backend)
)
self.exit(1)
notebook_dir = Unicode(config=True,
help="The directory to use for notebooks and kernels."
)
def _notebook_dir_default(self):
if self.file_to_run:
return os.path.dirname(os.path.abspath(self.file_to_run))
else:
return py3compat.getcwd()
def _notebook_dir_changed(self, name, old, new):
"""Do a bit of validation of the notebook dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.notebook_dir = os.path.abspath(new)
return
if not os.path.isdir(new):
raise TraitError("No such notebook dir: %r" % new)
# setting App.notebook_dir implies setting notebook and kernel dirs as well
self.config.FileContentsManager.root_dir = new
self.config.MappingKernelManager.root_dir = new
server_extensions = List(Unicode(), config=True,
help=("Python modules to load as notebook server extensions. "
"This is an experimental API, and may change in future releases.")
)
reraise_server_extension_failures = Bool(
False,
config=True,
help="Reraise exceptions encountered loading server extensions?",
)
def parse_command_line(self, argv=None):
super(NotebookApp, self).parse_command_line(argv)
if self.extra_args:
arg0 = self.extra_args[0]
f = os.path.abspath(arg0)
self.argv.remove(arg0)
if not os.path.exists(f):
self.log.critical("No such file or directory: %s", f)
self.exit(1)
# Use config here, to ensure that it takes higher priority than
# anything that comes from the profile.
c = Config()
if os.path.isdir(f):
c.NotebookApp.notebook_dir = f
elif os.path.isfile(f):
c.NotebookApp.file_to_run = f
self.update_config(c)
def init_kernel_argv(self):
"""add the profile-dir to arguments to be passed to IPython kernels"""
# FIXME: remove special treatment of IPython kernels
# Kernel should get *absolute* path to profile directory
self.ipython_kernel_argv = ["--profile-dir", self.profile_dir.location]
def init_configurables(self):
self.kernel_spec_manager = self.kernel_spec_manager_class(
parent=self,
ipython_dir=self.ipython_dir,
)
self.kernel_manager = self.kernel_manager_class(
parent=self,
log=self.log,
ipython_kernel_argv=self.ipython_kernel_argv,
connection_dir=self.profile_dir.security_dir,
)
self.contents_manager = self.contents_manager_class(
parent=self,
log=self.log,
)
self.session_manager = self.session_manager_class(
parent=self,
log=self.log,
kernel_manager=self.kernel_manager,
contents_manager=self.contents_manager,
)
self.cluster_manager = self.cluster_manager_class(
parent=self,
log=self.log,
)
self.config_manager = self.config_manager_class(
parent=self,
log=self.log,
profile_dir=self.profile_dir.location,
)
def init_logging(self):
# This prevents double log messages because tornado use a root logger that
# self.log is a child of. The logging module dipatches log messages to a log
# and all of its ancenstors until propagate is set to False.
self.log.propagate = False
for log in app_log, access_log, gen_log:
# consistent log output name (NotebookApp instead of tornado.access, etc.)
log.name = self.log.name
# hook up tornado 3's loggers to our app handlers
logger = logging.getLogger('tornado')
logger.propagate = True
logger.parent = self.log
logger.setLevel(self.log.level)
def init_webapp(self):
"""initialize tornado webapp and httpserver"""
self.tornado_settings['allow_origin'] = self.allow_origin
if self.allow_origin_pat:
self.tornado_settings['allow_origin_pat'] = re.compile(self.allow_origin_pat)
self.tornado_settings['allow_credentials'] = self.allow_credentials
# ensure default_url starts with base_url
if not self.default_url.startswith(self.base_url):
self.default_url = url_path_join(self.base_url, self.default_url)
self.web_app = NotebookWebApplication(
self, self.kernel_manager, self.contents_manager,
self.cluster_manager, self.session_manager, self.kernel_spec_manager,
self.config_manager,
self.log, self.base_url, self.default_url, self.tornado_settings,
self.jinja_environment_options
)
ssl_options = self.ssl_options
if self.certfile:
ssl_options['certfile'] = self.certfile
if self.keyfile:
ssl_options['keyfile'] = self.keyfile
if not ssl_options:
# None indicates no SSL config
ssl_options = None
else:
# Disable SSLv3, since its use is discouraged.
ssl_options['ssl_version']=ssl.PROTOCOL_TLSv1
self.login_handler_class.validate_security(self, ssl_options=ssl_options)
self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options,
xheaders=self.trust_xheaders)
success = None
for port in random_ports(self.port, self.port_retries+1):
try:
self.http_server.listen(port, self.ip)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info('The port %i is already in use, trying another random port.' % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warn("Permission to listen on port %i denied" % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical('ERROR: the notebook server could not be started because '
'no available port could be found.')
self.exit(1)
@property
def display_url(self):
ip = self.ip if self.ip else '[all ip addresses on your system]'
return self._url(ip)
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
proto = 'https' if self.certfile else 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
def init_terminals(self):
try:
from .terminal import initialize
initialize(self.web_app)
self.web_app.settings['terminals_available'] = True
except ImportError as e:
log = self.log.debug if sys.platform == 'win32' else self.log.warn
log("Terminals not available (error was %s)", e)
def init_signal(self):
if not sys.platform.startswith('win'):
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._signal_stop)
if hasattr(signal, 'SIGUSR1'):
# Windows doesn't support SIGUSR1
signal.signal(signal.SIGUSR1, self._signal_info)
if hasattr(signal, 'SIGINFO'):
# only on BSD-based systems
signal.signal(signal.SIGINFO, self._signal_info)
def _handle_sigint(self, sig, frame):
"""SIGINT handler spawns confirmation dialog"""
# register more forceful signal handler for ^C^C case
signal.signal(signal.SIGINT, self._signal_stop)
# request confirmation dialog in bg thread, to avoid
# blocking the App
thread = threading.Thread(target=self._confirm_exit)
thread.daemon = True
thread.start()
def _restore_sigint_handler(self):
"""callback for restoring original SIGINT handler"""
signal.signal(signal.SIGINT, self._handle_sigint)
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
info = self.log.info
info('interrupted')
print(self.notebook_info())
sys.stdout.write("Shutdown this notebook server (y/[n])? ")
sys.stdout.flush()
r,w,x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith('y') and 'n' not in line.lower():
self.log.critical("Shutdown confirmed")
ioloop.IOLoop.current().stop()
return
else:
print("No answer for 5s:", end=' ')
print("resuming operation...")
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
ioloop.IOLoop.current().add_callback(self._restore_sigint_handler)
def _signal_stop(self, sig, frame):
self.log.critical("received signal %s, stopping", sig)
ioloop.IOLoop.current().stop()
def _signal_info(self, sig, frame):
print(self.notebook_info())
def init_components(self):
"""Check the components submodule, and warn if it's unclean"""
status = submodule.check_submodule_status()
if status == 'missing':
self.log.warn("components submodule missing, running `git submodule update`")
submodule.update_submodules(submodule.ipython_parent())
elif status == 'unclean':
self.log.warn("components submodule unclean, you may see 404s on static/components")
self.log.warn("run `setup.py submodule` or `git submodule update` to update")
def init_server_extensions(self):
"""Load any extensions specified by config.
Import the module, then call the load_jupyter_server_extension function,
if one exists.
The extension API is experimental, and may change in future releases.
"""
for modulename in self.server_extensions:
try:
mod = importlib.import_module(modulename)
func = getattr(mod, 'load_jupyter_server_extension', None)
if func is not None:
func(self)
except Exception:
if self.reraise_server_extension_failures:
raise
self.log.warn("Error loading server extension %s", modulename,
exc_info=True)
@catch_config_error
def initialize(self, argv=None):
super(NotebookApp, self).initialize(argv)
self.init_logging()
self.init_kernel_argv()
self.init_configurables()
self.init_components()
self.init_webapp()
self.init_terminals()
self.init_signal()
self.init_server_extensions()
def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
self.log.info('Shutting down kernels')
self.kernel_manager.shutdown_all()
def notebook_info(self):
"Return the current working directory and the server url information"
info = self.contents_manager.info_string() + "\n"
info += "%d active kernels \n" % len(self.kernel_manager._kernels)
return info + "The IPython Notebook is running at: %s" % self.display_url
def server_info(self):
"""Return a JSONable dict of information about this server."""
return {'url': self.connection_url,
'hostname': self.ip if self.ip else 'localhost',
'port': self.port,
'secure': bool(self.certfile),
'base_url': self.base_url,
'notebook_dir': os.path.abspath(self.notebook_dir),
'pid': os.getpid()
}
def write_server_info_file(self):
"""Write the result of server_info() to the JSON file info_file."""
with open(self.info_file, 'w') as f:
json.dump(self.server_info(), f, indent=2)
def remove_server_info_file(self):
"""Remove the nbserver-<pid>.json file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.info_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def start(self):
""" Start the IPython Notebook server app, after initialization
This method takes no arguments so all configuration and initialization
must be done prior to calling this method."""
if self.subapp is not None:
return self.subapp.start()
info = self.log.info
for line in self.notebook_info().split("\n"):
info(line)
info("Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).")
self.write_server_info_file()
if self.open_browser or self.file_to_run:
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warn('No web browser found: %s.' % e)
browser = None
if self.file_to_run:
if not os.path.exists(self.file_to_run):
self.log.critical("%s does not exist" % self.file_to_run)
self.exit(1)
relpath = os.path.relpath(self.file_to_run, self.notebook_dir)
uri = url_path_join('notebooks', *relpath.split(os.sep))
else:
uri = 'tree'
if browser:
b = lambda : browser.open(url_path_join(self.connection_url, uri),
new=2)
threading.Thread(target=b).start()
self.io_loop = ioloop.IOLoop.current()
if sys.platform.startswith('win'):
# add no-op to wake every 5s
# to handle signals that may be ignored by the inner loop
pc = ioloop.PeriodicCallback(lambda : None, 5000)
pc.start()
try:
self.io_loop.start()
except KeyboardInterrupt:
info("Interrupted...")
finally:
self.cleanup_kernels()
self.remove_server_info_file()
def stop(self):
def _stop():
self.http_server.stop()
self.io_loop.stop()
self.io_loop.add_callback(_stop)
def list_running_servers(profile='default'):
"""Iterate over the server info files of running notebook servers.
Given a profile name, find nbserver-* files in the security directory of
that profile, and yield dicts of their information, each one pertaining to
a currently running notebook server instance.
"""
pd = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), name=profile)
for file in os.listdir(pd.security_dir):
if file.startswith('nbserver-'):
with io.open(os.path.join(pd.security_dir, file), encoding='utf-8') as f:
info = json.load(f)
# Simple check whether that process is really still running
# Also remove leftover files from IPython 2.x without a pid field
if ('pid' in info) and check_pid(info['pid']):
yield info
else:
# If the process has died, try to delete its info file
try:
os.unlink(file)
except OSError:
pass # TODO: This should warn or log or something
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
launch_new_instance = NotebookApp.launch_instance
|
code.py | # Released under the MIT License. See LICENSE for details.
#
"""Functionality for formatting, linting, etc. code."""
from __future__ import annotations
import os
import subprocess
import sys
from pathlib import Path
from typing import TYPE_CHECKING
from efrotools.filecache import FileCache
if TYPE_CHECKING:
from typing import Set, List, Dict, Any, Union, Optional
def format_clang_format(projroot: Path, full: bool) -> None:
"""Run clang-format on all of our source code (multithreaded)."""
import time
import concurrent.futures
from multiprocessing import cpu_count
from efrotools import get_files_hash
os.chdir(projroot)
cachepath = Path(projroot, '.cache/format_clang_format')
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
cfconfig = Path(projroot, '.clang-format')
filenames = get_code_filenames(projroot)
confighash = get_files_hash([cfconfig])
cache.update(filenames, confighash)
dirtyfiles = cache.get_dirty_files()
def format_file(filename: str) -> Dict[str, Any]:
start_time = time.time()
# Note: seems os.system does not unlock the gil;
# make sure to use subprocess.
result = subprocess.call(['clang-format', '-i', filename])
if result != 0:
raise Exception(f'Formatting failed for {filename}')
duration = time.time() - start_time
print(f'Formatted {filename} in {duration:.2f} seconds.')
sys.stdout.flush()
return {'f': filename, 't': duration}
with concurrent.futures.ThreadPoolExecutor(
max_workers=cpu_count()) as executor:
# Converting this to a list will propagate any errors.
list(executor.map(format_file, dirtyfiles))
if dirtyfiles:
# Since we changed files, need to update hashes again.
cache.update(filenames, confighash)
cache.mark_clean(filenames)
cache.write()
print(f'Formatting is up to date for {len(filenames)} code files.',
flush=True)
def check_cpplint(projroot: Path, full: bool) -> None:
"""Run cpplint on all our applicable code."""
# pylint: disable=too-many-locals
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import cpu_count
from efrotools import getconfig, PYVER
from efro.terminal import Clr
from efro.error import CleanError
os.chdir(projroot)
filenames = get_code_filenames(projroot)
for fpath in filenames:
if ' ' in fpath:
raise Exception(f'Found space in path {fpath}; unexpected.')
# Check the config for a list of ones to ignore.
code_blacklist: List[str] = getconfig(projroot).get(
'cpplint_blacklist', [])
# Just pretend blacklisted ones don't exist.
filenames = [f for f in filenames if f not in code_blacklist]
filenames = [f for f in filenames if not f.endswith('.mm')]
cachepath = Path(projroot, '.cache/check_cpplint')
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
# Clear out entries and hashes for files that have changed/etc.
cache.update(filenames, '')
dirtyfiles = cache.get_dirty_files()
if dirtyfiles:
print(f'{Clr.BLU}CppLint checking'
f' {len(dirtyfiles)} file(s)...{Clr.RST}')
disabled_filters: List[str] = [
'build/include_what_you_use',
'build/c++11',
'readability/nolint',
'legal/copyright',
]
filterstr = ','.join(f'-{x}' for x in disabled_filters)
def lint_file(filename: str) -> None:
result = subprocess.call([
f'python{PYVER}', '-m', 'cpplint', '--root=src',
f'--filter={filterstr}', filename
])
if result != 0:
raise CleanError(
f'{Clr.RED}Cpplint failed for {filename}.{Clr.RST}')
with ThreadPoolExecutor(max_workers=cpu_count()) as executor:
# Converting this to a list will propagate any errors.
list(executor.map(lint_file, dirtyfiles))
if dirtyfiles:
cache.mark_clean(filenames)
cache.write()
print(
f'{Clr.GRN}CppLint: all {len(filenames)} files are passing.{Clr.RST}',
flush=True)
def get_code_filenames(projroot: Path) -> List[str]:
"""Return the list of files to lint-check or auto-formatting."""
from efrotools import getconfig
exts = ('.h', '.c', '.cc', '.cpp', '.cxx', '.m', '.mm')
places = getconfig(projroot).get('code_source_dirs', None)
if places is None:
raise RuntimeError('code_source_dirs not declared in config')
codefilenames = []
for place in places:
for root, _dirs, files in os.walk(place):
for fname in files:
if any(fname.endswith(ext) for ext in exts):
codefilenames.append(os.path.join(root, fname))
codefilenames.sort()
return codefilenames
def format_yapf(projroot: Path, full: bool) -> None:
"""Runs yapf on all of our Python code."""
import time
from concurrent.futures import ThreadPoolExecutor
from multiprocessing import cpu_count
from efrotools import get_files_hash, PYVER
os.chdir(projroot)
cachepath = Path(projroot, '.cache/format_yapf')
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
yapfconfig = Path(projroot, '.style.yapf')
filenames = get_script_filenames(projroot)
confighash = get_files_hash([yapfconfig])
cache.update(filenames, confighash)
dirtyfiles = cache.get_dirty_files()
def format_file(filename: str) -> None:
start_time = time.time()
result = subprocess.call(
[f'python{PYVER}', '-m', 'yapf', '--in-place', filename])
if result != 0:
raise Exception(f'Formatting failed for {filename}')
duration = time.time() - start_time
print(f'Formatted {filename} in {duration:.2f} seconds.')
sys.stdout.flush()
with ThreadPoolExecutor(max_workers=cpu_count()) as executor:
# Convert the futures to a list to propagate any errors even
# though there are no return values we use.
list(executor.map(format_file, dirtyfiles))
if dirtyfiles:
# Since we changed files, need to update hashes again.
cache.update(filenames, confighash)
cache.mark_clean(filenames)
cache.write()
print(f'Formatting is up to date for {len(filenames)} script files.',
flush=True)
def format_yapf_str(projroot: Path, code: str) -> str:
"""Run yapf formatting on the provided inline code."""
from efrotools import PYVER
out = subprocess.run([f'python{PYVER}', '-m', 'yapf'],
capture_output=True,
check=True,
input=code.encode(),
cwd=projroot)
return out.stdout.decode()
def _should_include_script(fnamefull: str) -> bool:
fname = os.path.basename(fnamefull)
if fname.endswith('.py'):
return True
# Look for 'binary' scripts with no extensions too.
if not fname.startswith('.') and '.' not in fname:
try:
with open(fnamefull, encoding='utf-8') as infile:
line = infile.readline()
if '/usr/bin/env python' in line or '/usr/bin/python' in line:
return True
except UnicodeDecodeError:
# Actual binary files will probably kick back this error.
pass
return False
def get_script_filenames(projroot: Path) -> List[str]:
"""Return the Python filenames to lint-check or auto-format."""
from efrotools import getconfig
filenames = set()
places = getconfig(projroot).get('python_source_dirs', None)
if places is None:
raise RuntimeError('python_source_dirs not declared in config')
for place in places:
for root, _dirs, files in os.walk(place):
for fname in files:
fnamefull = os.path.join(root, fname)
# Skip symlinks (we conceivably operate on the original too)
if os.path.islink(fnamefull):
continue
if _should_include_script(fnamefull):
filenames.add(fnamefull)
return sorted(list(f for f in filenames if 'flycheck_' not in f))
def runpylint(projroot: Path, filenames: List[str]) -> None:
"""Run Pylint explicitly on files."""
pylintrc = Path(projroot, '.pylintrc')
if not os.path.isfile(pylintrc):
raise Exception('pylintrc not found where expected')
# Technically we could just run pylint standalone via command line here,
# but let's go ahead and run it inline so we're consistent with our cached
# full-project version.
_run_pylint(projroot,
pylintrc,
cache=None,
dirtyfiles=filenames,
allfiles=None)
def pylint(projroot: Path, full: bool, fast: bool) -> None:
"""Run Pylint on all scripts in our project (with smart dep tracking)."""
from efrotools import get_files_hash
from efro.terminal import Clr
pylintrc = Path(projroot, '.pylintrc')
if not os.path.isfile(pylintrc):
raise Exception('pylintrc not found where expected')
filenames = get_script_filenames(projroot)
if any(' ' in name for name in filenames):
raise Exception('found space in path; unexpected')
script_blacklist: List[str] = []
filenames = [f for f in filenames if f not in script_blacklist]
cachebasename = 'check_pylint_fast' if fast else 'check_pylint'
cachepath = Path(projroot, '.cache', cachebasename)
if full and cachepath.exists():
cachepath.unlink()
cache = FileCache(cachepath)
# Clear out entries and hashes for files that have changed/etc.
cache.update(filenames, get_files_hash([pylintrc]))
# Do a recursive dependency check and mark all files who are
# either dirty or have a dependency that is dirty.
filestates: Dict[str, bool] = {}
for fname in filenames:
_dirty_dep_check(fname, filestates, cache, fast, 0)
dirtyfiles = [k for k, v in filestates.items() if v]
# Let's sort by modification time, so ones we're actively trying
# to fix get linted first and we see remaining errors faster.
dirtyfiles.sort(reverse=True, key=lambda f: os.stat(f).st_mtime)
if dirtyfiles:
print(
f'{Clr.BLU}Pylint checking {len(dirtyfiles)} file(s)...{Clr.RST}',
flush=True)
try:
_run_pylint(projroot, pylintrc, cache, dirtyfiles, filenames)
finally:
# No matter what happens, we still want to
# update our disk cache (since some lints may have passed).
cache.write()
print(f'{Clr.GRN}Pylint: all {len(filenames)} files are passing.{Clr.RST}',
flush=True)
cache.write()
def _dirty_dep_check(fname: str, filestates: Dict[str, bool], cache: FileCache,
fast: bool, recursion: int) -> bool:
"""Recursively check a file's deps and return whether it is dirty."""
# pylint: disable=too-many-branches
if not fast:
# Check for existing dirty state (only applies in non-fast where
# we recurse infinitely).
curstate = filestates.get(fname)
if curstate is not None:
return curstate
# Ok; there's no current state for this file.
# First lets immediately mark it as clean so if a dependency of ours
# queries it we won't loop infinitely. (If we're actually dirty that
# will be reflected properly once we're done).
if not fast:
filestates[fname] = False
# If this dependency has disappeared, consider that dirty.
if fname not in cache.entries:
dirty = True
else:
cacheentry = cache.entries[fname]
# See if we ourself are dirty
if 'hash' not in cacheentry:
dirty = True
else:
# Ok we're clean; now check our dependencies..
dirty = False
# Only increment recursion in fast mode, and
# skip dependencies if we're pass the recursion limit.
recursion2 = recursion
if fast:
# Our one exception is top level ba which basically aggregates.
if not fname.endswith('/ba/__init__.py'):
recursion2 += 1
if recursion2 <= 1:
deps = cacheentry.get('deps', [])
for dep in deps:
# If we have a dep that no longer exists, WE are dirty.
if not os.path.exists(dep):
dirty = True
break
if _dirty_dep_check(dep, filestates, cache, fast,
recursion2):
dirty = True
break
# Cache and return our dirty state..
# Note: for fast mode we limit to recursion==0 so we only write when
# the file itself is being directly visited.
if recursion == 0:
filestates[fname] = dirty
return dirty
def _run_pylint(projroot: Path, pylintrc: Union[Path, str],
cache: Optional[FileCache], dirtyfiles: List[str],
allfiles: Optional[List[str]]) -> Dict[str, Any]:
import time
from pylint import lint
from efro.error import CleanError
from efro.terminal import Clr
start_time = time.time()
args = ['--rcfile', str(pylintrc), '--output-format=colorized']
args += dirtyfiles
name = f'{len(dirtyfiles)} file(s)'
run = lint.Run(args, do_exit=False)
if cache is not None:
assert allfiles is not None
result = _apply_pylint_run_to_cache(projroot, run, dirtyfiles,
allfiles, cache)
if result != 0:
raise CleanError(f'Pylint failed for {result} file(s).')
# Sanity check: when the linter fails we should always be failing too.
# If not, it means we're probably missing something and incorrectly
# marking a failed file as clean.
if run.linter.msg_status != 0 and result == 0:
raise RuntimeError('Pylint linter returned non-zero result'
' but we did not; this is probably a bug.')
else:
if run.linter.msg_status != 0:
raise CleanError('Pylint failed.')
duration = time.time() - start_time
print(f'{Clr.GRN}Pylint passed for {name}'
f' in {duration:.1f} seconds.{Clr.RST}')
sys.stdout.flush()
return {'f': dirtyfiles, 't': duration}
def _apply_pylint_run_to_cache(projroot: Path, run: Any, dirtyfiles: List[str],
allfiles: List[str], cache: FileCache) -> int:
# pylint: disable=too-many-locals
# pylint: disable=too-many-branches
# pylint: disable=too-many-statements
from astroid import modutils
from efrotools import getconfig
from efro.error import CleanError
# First off, build a map of dirtyfiles to module names
# (and the corresponding reverse map).
paths_to_names: Dict[str, str] = {}
names_to_paths: Dict[str, str] = {}
for fname in allfiles:
try:
mpath = modutils.modpath_from_file(fname)
mpath = _filter_module_name('.'.join(mpath))
paths_to_names[fname] = mpath
except ImportError:
# This probably means its a tool or something not in our
# standard path. In this case just use its base name.
# (seems to be what pylint does)
dummyname = os.path.splitext(os.path.basename(fname))[0]
paths_to_names[fname] = dummyname
for key, val in paths_to_names.items():
names_to_paths[val] = key
# If there's any cyclic-import errors, just mark all deps as dirty;
# don't want to add the logic to figure out which ones the cycles cover
# since they all seems to appear as errors for the last file in the list.
cycles: int = run.linter.stats.get('by_msg', {}).get('cyclic-import', 0)
have_dep_cycles: bool = cycles > 0
if have_dep_cycles:
print(f'Found {cycles} cycle-errors; keeping all dirty files dirty.')
# Update dependencies for what we just ran.
# A run leaves us with a map of modules to a list of the modules that
# imports them. We want the opposite though: for each of our modules
# we want a list of the modules it imports.
reversedeps = {}
# Make sure these are all proper module names; no foo.bar.__init__ stuff.
for key, val in run.linter.stats['dependencies'].items():
sval = [_filter_module_name(m) for m in val]
reversedeps[_filter_module_name(key)] = sval
deps: Dict[str, Set[str]] = {}
untracked_deps = set()
for mname, mallimportedby in reversedeps.items():
for mimportedby in mallimportedby:
if mname in names_to_paths:
deps.setdefault(mimportedby, set()).add(mname)
else:
untracked_deps.add(mname)
ignored_untracked_deps: List[str] = getconfig(projroot).get(
'pylint_ignored_untracked_deps', [])
# Add a few that this package itself triggers.
ignored_untracked_deps += ['pylint.lint', 'astroid.modutils', 'astroid']
# Ignore some specific untracked deps; complain about any others.
untracked_deps = set(dep for dep in untracked_deps
if dep not in ignored_untracked_deps
and not dep.startswith('bametainternal'))
if untracked_deps:
raise CleanError(
f'Pylint found untracked dependencies: {untracked_deps}.'
' If these are external to your project, add them to'
' "pylint_ignored_untracked_deps" in the project config.')
# Finally add the dependency lists to our entries (operate on
# everything in the run; it may not be mentioned in deps).
no_deps_modules = set()
for fname in dirtyfiles:
fmod = paths_to_names[fname]
if fmod not in deps:
# Since this code is a bit flaky, lets always announce when we
# come up empty and keep a whitelist of expected values to ignore.
no_deps_modules.add(fmod)
depsval: List[str] = []
else:
# Our deps here are module names; store paths.
depsval = [names_to_paths[dep] for dep in deps[fmod]]
cache.entries[fname]['deps'] = depsval
# Let's print a list of modules with no detected deps so we can make sure
# this is behaving.
if no_deps_modules:
if bool(False):
print('NOTE: no dependencies found for:',
', '.join(no_deps_modules))
# Ok, now go through all dirtyfiles involved in this run.
# Mark them as either errored or clean depending on whether there's
# error info for them in the run stats.
# Once again need to convert any foo.bar.__init__ to foo.bar.
stats_by_module: Dict[str, Any] = {
_filter_module_name(key): val
for key, val in run.linter.stats['by_module'].items()
}
errcount = 0
for fname in dirtyfiles:
mname2 = paths_to_names.get(fname)
if mname2 is None:
raise Exception('unable to get module name for "' + fname + '"')
counts = stats_by_module.get(mname2)
# 'statement' count seems to be new and always non-zero; ignore it
if counts is not None:
counts = {c: v for c, v in counts.items() if c != 'statement'}
if (counts is not None and any(counts.values())) or have_dep_cycles:
# print('GOT FAIL FOR', fname, counts)
if 'hash' in cache.entries[fname]:
del cache.entries[fname]['hash']
errcount += 1
else:
# print('MARKING FILE CLEAN', mname2, fname)
cache.entries[fname]['hash'] = (cache.curhashes[fname])
return errcount
def _filter_module_name(mpath: str) -> str:
"""Filter weird module paths such as 'foo.bar.__init__' to 'foo.bar'."""
# Seems Pylint returns module paths with __init__ on the end in some cases
# and not in others. Could dig into it, but for now just filtering them
# out...
return mpath[:-9] if mpath.endswith('.__init__') else mpath
def runmypy(projroot: Path,
filenames: List[str],
full: bool = False,
check: bool = True) -> None:
"""Run MyPy on provided filenames."""
from efrotools import PYTHON_BIN
args = [
PYTHON_BIN, '-m', 'mypy', '--pretty', '--no-error-summary',
'--config-file',
str(Path(projroot, '.mypy.ini'))
] + filenames
if full:
args.insert(args.index('mypy') + 1, '--no-incremental')
subprocess.run(args, check=check)
def mypy(projroot: Path, full: bool) -> None:
"""Type check all of our scripts using mypy."""
import time
from efro.terminal import Clr
from efro.error import CleanError
filenames = get_script_filenames(projroot)
desc = '(full)' if full else '(incremental)'
print(f'{Clr.BLU}Running Mypy {desc}...{Clr.RST}', flush=True)
starttime = time.time()
try:
runmypy(projroot, filenames, full)
except Exception as exc:
raise CleanError('Mypy failed.') from exc
duration = time.time() - starttime
print(f'{Clr.GRN}Mypy passed in {duration:.1f} seconds.{Clr.RST}',
flush=True)
def dmypy(projroot: Path) -> None:
"""Type check all of our scripts using mypy in daemon mode."""
import time
from efro.terminal import Clr
from efro.error import CleanError
filenames = get_script_filenames(projroot)
# Special case; explicitly kill the daemon.
if '-stop' in sys.argv:
subprocess.run(['dmypy', 'stop'], check=False)
return
print('Running Mypy (daemon)...', flush=True)
starttime = time.time()
try:
args = [
'dmypy', 'run', '--timeout', '3600', '--', '--config-file',
'.mypy.ini', '--pretty'
] + filenames
subprocess.run(args, check=True)
except Exception as exc:
raise CleanError('Mypy daemon: fail.') from exc
duration = time.time() - starttime
print(f'{Clr.GRN}Mypy daemon passed in {duration:.1f} seconds.{Clr.RST}',
flush=True)
def _parse_idea_results(path: Path) -> int:
"""Print errors found in an idea inspection xml file.
Returns the number of errors found.
"""
import xml.etree.ElementTree as Et
error_count = 0
root = Et.parse(str(path)).getroot()
for child in root:
line: Optional[str] = None
description: Optional[str] = None
fname: Optional[str] = None
if child.tag == 'problem':
is_error = True
for pchild in child:
if pchild.tag == 'problem_class':
# We still report typos but we don't fail the
# check due to them (that just gets tedious).
if pchild.text == 'Typo':
is_error = False
if pchild.tag == 'line':
line = pchild.text
if pchild.tag == 'description':
description = pchild.text
if pchild.tag == 'file':
fname = pchild.text
if isinstance(fname, str):
fname = fname.replace('file://$PROJECT_DIR$/', '')
print(f'{fname}:{line}: {description}')
if is_error:
error_count += 1
return error_count
def _run_idea_inspections(projroot: Path,
scripts: List[str],
displayname: str,
inspect: Path,
verbose: bool,
inspectdir: Path = None) -> None:
"""Actually run idea inspections.
Throw an Exception if anything is found or goes wrong.
"""
# pylint: disable=too-many-locals
# pylint: disable=consider-using-with
import tempfile
import time
import datetime
from efro.error import CleanError
from efro.terminal import Clr
start_time = time.time()
print(
f'{Clr.BLU}{displayname} checking'
f' {len(scripts)} file(s)...{Clr.RST}',
flush=True)
tmpdir = tempfile.TemporaryDirectory()
iprof = Path(projroot, '.idea/inspectionProfiles/Default.xml')
if not iprof.exists():
iprof = Path(projroot, '.idea/inspectionProfiles/Project_Default.xml')
if not iprof.exists():
raise Exception('No default inspection profile found.')
cmd = [str(inspect), str(projroot), str(iprof), tmpdir.name, '-v2']
if inspectdir is not None:
cmd += ['-d', str(inspectdir)]
running = True
def heartbeat() -> None:
"""Print the time occasionally to make the log more informative."""
while running:
time.sleep(60)
print('Heartbeat', datetime.datetime.now(), flush=True)
if verbose:
import threading
print(cmd, flush=True)
threading.Thread(target=heartbeat, daemon=True).start()
result = subprocess.run(cmd, capture_output=not verbose, check=False)
running = False
if result.returncode != 0:
# In verbose mode this stuff got printed already.
if not verbose:
stdout = (result.stdout.decode() if isinstance(
result.stdout, bytes) else str(result.stdout))
stderr = (result.stderr.decode() if isinstance(
result.stdout, bytes) else str(result.stdout))
print(f'{displayname} inspection failure stdout:\n{stdout}' +
f'{displayname} inspection failure stderr:\n{stderr}')
raise RuntimeError(f'{displayname} inspection failed.')
files = [f for f in os.listdir(tmpdir.name) if not f.startswith('.')]
total_errors = 0
if files:
for fname in files:
total_errors += _parse_idea_results(Path(tmpdir.name, fname))
if total_errors > 0:
raise CleanError(f'{Clr.SRED}{displayname} inspection'
f' found {total_errors} error(s).{Clr.RST}')
duration = time.time() - start_time
print(
f'{Clr.GRN}{displayname} passed for {len(scripts)} files'
f' in {duration:.1f} seconds.{Clr.RST}',
flush=True)
def _run_idea_inspections_cached(cachepath: Path,
filenames: List[str],
full: bool,
projroot: Path,
displayname: str,
inspect: Path,
verbose: bool,
inspectdir: Path = None) -> None:
# pylint: disable=too-many-locals
import hashlib
import json
from efro.terminal import Clr
md5 = hashlib.md5()
# Let's calc a single hash from the contents of all script files and only
# run checks when that changes. Sadly there's not much else optimization
# wise that we can easily do, but this will at least prevent re-checks when
# nothing at all has changed.
for filename in filenames:
with open(filename, 'rb') as infile:
md5.update(infile.read())
# Also hash a few .idea files so we re-run inspections when they change.
extra_hash_paths = [
Path(projroot, '.idea/inspectionProfiles/Default.xml'),
Path(projroot, '.idea/inspectionProfiles/Project_Default.xml'),
Path(projroot, '.idea/dictionaries/ericf.xml')
]
for epath in extra_hash_paths:
if os.path.exists(epath):
with open(epath, 'rb') as infile:
md5.update(infile.read())
current_hash = md5.hexdigest()
existing_hash: Optional[str]
try:
with open(cachepath, encoding='utf-8') as infile2:
existing_hash = json.loads(infile2.read())['hash']
except Exception:
existing_hash = None
if full or current_hash != existing_hash:
_run_idea_inspections(projroot,
filenames,
displayname,
inspect=inspect,
verbose=verbose,
inspectdir=inspectdir)
cachepath.parent.mkdir(parents=True, exist_ok=True)
with open(cachepath, 'w', encoding='utf-8') as outfile:
outfile.write(json.dumps({'hash': current_hash}))
print(
f'{Clr.GRN}{displayname}: all {len(filenames)}'
f' files are passing.{Clr.RST}',
flush=True)
def check_pycharm(projroot: Path, full: bool, verbose: bool) -> None:
"""Run pycharm inspections on all our scripts."""
import time
# FIXME: Generalize this to work with at least linux, possibly windows.
cachepath = Path('.cache/check_pycharm')
filenames = get_script_filenames(projroot)
pycharmroot = Path('/Applications/PyCharm CE.app')
pycharmbin = Path(pycharmroot, 'Contents/MacOS/pycharm')
inspect = Path(pycharmroot, 'Contents/bin/inspect.sh')
# In full mode, clear out pycharm's caches first.
# It seems we need to spin up the GUI and give it a bit to
# re-cache system python for this to work...
# UPDATE: This really slows things down, so we now only do it in
# very specific cases where time isn't important.
# (such as our daily full-test-runs)
# UPDATE 2: Looks like we might no longer need to do the GUI spin-up bit.
# If we can be certain of this, we can go back to simply blowing away
# the cache for 'full' mode checks without the env var.
if full and os.environ.get('EFROTOOLS_FULL_PYCHARM_RECACHE') == '1':
print('Clearing PyCharm caches...', flush=True)
subprocess.run('rm -rf ~/Library/Caches/JetBrains/PyCharmCE*',
shell=True,
check=True)
# Hoping this isn't necessary anymore. Need to rework this if it is,
# since it now gets run through ssh and gui stuff doesn't seem to
# work that way.
if bool(False):
print('Launching GUI PyCharm to rebuild caches...', flush=True)
with subprocess.Popen(str(pycharmbin)) as process:
# Wait a bit and ask it nicely to die.
# We need to make sure it has enough time to do its
# cache updating thing even if the system is fully under load.
time.sleep(5 * 60)
# Seems killing it via applescript is more likely to leave it
# in a working state for offline inspections than TERM signal..
subprocess.run(
"osascript -e 'tell application \"PyCharm CE\" to quit'",
shell=True,
check=False)
print('Waiting for GUI PyCharm to quit...', flush=True)
process.wait()
_run_idea_inspections_cached(cachepath=cachepath,
filenames=filenames,
full=full,
projroot=projroot,
displayname='PyCharm',
inspect=inspect,
verbose=verbose)
def check_clioncode(projroot: Path, full: bool, verbose: bool) -> None:
"""Run clion inspections on all our code."""
import time
cachepath = Path('.cache/check_clioncode')
filenames = get_code_filenames(projroot)
clionroot = Path('/Applications/CLion.app')
# clionbin = Path(clionroot, 'Contents/MacOS/clion')
inspect = Path(clionroot, 'Contents/bin/inspect.sh')
# At the moment offline clion inspections seem a bit flaky.
# They don't seem to run at all if we haven't opened the project
# in the GUI, and it seems recent changes can get ignored for that
# reason too.
# So for now let's try blowing away caches, launching the gui
# temporarily, and then kicking off inspections after that. Sigh.
print('Clearing CLion caches...', flush=True)
caches_root = os.environ['HOME'] + '/Library/Caches/JetBrains'
if not os.path.exists(caches_root):
raise RuntimeError(f'CLion caches root not found: {caches_root}')
subprocess.run('rm -rf ~/Library/Caches/JetBrains/CLion*',
shell=True,
check=True)
# UPDATE: seems this is unnecessary now; should double check.
# Note: I'm assuming this project needs to be open when the GUI
# comes up. Currently just have one project so can rely on auto-open
# but may need to get fancier later if that changes.
if bool(True):
print('Launching GUI CLion to rebuild caches...', flush=True)
# process = subprocess.Popen(str(clionbin))
subprocess.run(
['open', '-a', clionroot,
Path(projroot, 'ballisticacore-cmake')],
check=True)
# Wait a moment and ask it nicely to die.
waittime = 60
while waittime > 0:
print(f'Waiting for {waittime} more seconds.', flush=True)
time.sleep(10)
waittime -= 10
# For some reason this is giving a return-code 1 although
# it appears to be working.
print('Waiting for GUI CLion to quit...', flush=True)
subprocess.run(
[
'osascript', '-e', 'tell application "CLion" to quit\n'
'repeat until application "CLion" is not running\n'
' delay 1\n'
'end repeat'
],
check=False,
)
time.sleep(5)
# process.terminate()
# process.wait(timeout=60)
print('Launching Offline CLion to run inspections...', flush=True)
_run_idea_inspections_cached(cachepath=cachepath,
filenames=filenames,
full=full,
projroot=Path(projroot,
'ballisticacore-cmake'),
inspectdir=Path(projroot, 'src/ballistica'),
displayname='CLion',
inspect=inspect,
verbose=verbose)
def check_android_studio(projroot: Path, full: bool, verbose: bool) -> None:
"""Run Android Studio inspections on all our code."""
# import time
cachepath = Path('.cache/check_android_studio')
filenames = get_code_filenames(projroot)
clionroot = Path('/Applications/Android Studio.app')
# clionbin = Path(clionroot, 'Contents/MacOS/studio')
inspect = Path(clionroot, 'Contents/bin/inspect.sh')
# At the moment offline clion inspections seem a bit flaky.
# They don't seem to run at all if we haven't opened the project
# in the GUI, and it seems recent changes can get ignored for that
# reason too.
# So for now let's try blowing away caches, launching the gui
# temporarily, and then kicking off inspections after that. Sigh.
# print('Clearing Android Studio caches...', flush=True)
# subprocess.run('rm -rf ~/Library/Caches/AndroidStudio*',
# shell=True,
# check=True)
# Note: I'm assuming this project needs to be open when the GUI
# comes up. Currently just have one project so can rely on auto-open
# but may need to get fancier later if that changes.
# print('Launching GUI CLion to rebuild caches...', flush=True)
# process = subprocess.Popen(str(clionbin))
# Wait a moment and ask it nicely to die.
# time.sleep(120)
# Seems killing it via applescript is more likely to leave it
# in a working state for offline inspections than TERM signal..
# subprocess.run(
# "osascript -e 'tell application \"Android Studio\" to quit'",
# shell=True)
# process.terminate()
# print('Waiting for GUI CLion to quit...', flush=True)
# process.wait(timeout=60)
print('Launching Offline Android Studio to run inspections...', flush=True)
_run_idea_inspections_cached(
cachepath=cachepath,
filenames=filenames,
full=full,
projroot=Path(projroot, 'ballisticacore-android'),
inspectdir=Path(
projroot,
'ballisticacore-android/BallisticaCore/src/main/cpp/src/ballistica'
),
# inspectdir=None,
displayname='Android Studio',
inspect=inspect,
verbose=verbose)
|
messenger.py | from multiprocessing import Queue
from threading import Thread
class MessengerPostbox:
def __init__(self, messenger, source):
"""
Initialize a message feeder bound to a messenger and source
"""
self.messenger = messenger
self.source = source
def send(self, key, *args):
self.messenger.send(self.source, key, args)
class MessengerListener:
def handleMessage(self, source, event, args):
pass
class Messenger:
def __init__(self):
"""
Initialize
"""
self.isActive = True
self.queue = Queue()
self.thread = Thread(target=self._messengerThreadMain)
self.listeners = []
self.start()
def start(self):
"""
"""
self.thread.start()
def stop(self):
"""
Stop messenger thread
"""
self.isActive = False
self.send(None, None, None)
self.thread.join()
def getPostbox(self, source):
"""
Get a new messenger item
"""
return MessengerPostbox(self, source)
def addListener(self, listener):
"""
"""
self.listeners.append(listener)
def removeListener(self, listener):
"""
"""
if not listener in self.listeners:
return
self.listeners.remove(listener)
def _messengerThreadMain(self):
"""
Wait for messages in the queue
"""
while True:
message = self.queue.get()
if not self.isActive:
return
self.handle(message[0], message[1], message[2])
def send(self, source, event, args):
"""
Enqueue event
"""
self.queue.put((source, event, args))
def handle(self, source, event, args):
"""
Handle an event
"""
for listener in self.listeners:
listener.handleMessage(source, event, args)
|
performancetestreceiver.py | import threading
from time import sleep
from virtualisation.misc.log import Log
from virtualisation.triplestore.threadedtriplestoreadapter import ThreadedTriplestoreAdapter
from virtualisation.wrapper.wrapperoutputreceiver import AbstractReceiver
__author__ = 'Marten Fischer (m.fischer@hs-osnabrueck.de)'
class AbstractPerformanceMeter(AbstractReceiver):
def __init__(self, delay, txt):
self.counter = 0
self.stop = False
self.delay = delay
self.txt = txt
threading.Thread(target=self.__run).start()
def __run(self):
while not self.stop:
sleep(self.delay)
Log.i(self.counter, self.txt)
# print self.counter, self.txt
# print "ThreadedTriplestoreAdapter Buffer Size:", ThreadedTriplestoreAdapter.getTotalBufferSize()
Log.i("ThreadedTriplestoreAdapter Buffer Size:", ThreadedTriplestoreAdapter.getTotalBufferSize())
self.counter = 0
def receive(self, parsedData, sensordescription, clock, quality):
self.counter += 1
def stop(self):
self.stop = True
class PerformanceMeterSeconds(AbstractPerformanceMeter):
def __init__(self):
super(PerformanceMeterSeconds, self).__init__(1, "observations/s")
class PerformanceMeterMinutes(AbstractPerformanceMeter):
def __init__(self):
super(PerformanceMeterMinutes, self).__init__(60, "observations/min")
|
swaprebalance.py | import time
import datetime
import unittest
from TestInput import TestInputSingleton
import logger
from couchbase_helper.cluster import Cluster
from membase.api.rest_client import RestConnection, RestHelper
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.cluster_helper import ClusterOperationHelper
from membase.helper.rebalance_helper import RebalanceHelper
from memcached.helper.data_helper import LoadWithMcsoda
from threading import Thread
from remote.remote_util import RemoteMachineShellConnection
from memcached.helper.data_helper import MemcachedClientHelper
from membase.api.exception import RebalanceFailedException
from basetestcase import BaseTestCase
from security.rbac_base import RbacBase
class SwapRebalanceBase(unittest.TestCase):
@staticmethod
def common_setup(self):
self.cluster_helper = Cluster()
self.log = logger.Logger.get_logger()
self.cluster_run = False
self.input = TestInputSingleton.input
self.servers = self.input.servers
serverInfo = self.servers[0]
rest = RestConnection(serverInfo)
if len({server.ip for server in self.servers}) == 1:
ip = rest.get_nodes_self().ip
for server in self.servers:
server.ip = ip
self.cluster_run = True
self.case_number = self.input.param("case_number", 0)
self.replica = self.input.param("replica", 1)
self.keys_count = self.input.param("keys-count", 1000)
self.load_ratio = self.input.param("load-ratio", 1)
self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
self.num_buckets = self.input.param("num-buckets", 1)
self.bucket_storage = self.input.param("bucket_storage", 'couchstore')
self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
self.num_initial_servers = self.input.param("num-initial-servers", 3)
self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
self.do_access = self.input.param("do-access", True)
self.load_started = False
self.loaders = []
try:
# Clear the state from Previous invalid run
if rest._rebalance_progress_status() == 'running':
self.log.warning("rebalancing is still running, previous test should be verified")
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
self.log.info("============== SwapRebalanceBase setup was started for test #{0} {1}=============="\
.format(self.case_number, self._testMethodName))
SwapRebalanceBase.reset(self)
# Make sure the test is setup correctly
min_servers = int(self.num_initial_servers) + int(self.num_swap)
msg = "minimum {0} nodes required for running swap rebalance"
self.assertTrue(len(self.servers) >= min_servers, msg=msg.format(min_servers))
self.log.info('picking server : {0} as the master'.format(serverInfo))
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = rest.get_nodes_self()
rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
SwapRebalanceBase.enable_diag_eval_on_non_local_hosts(self, serverInfo)
# Add built-in user
testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': 'password'}]
RbacBase().create_user_source(testuser, 'builtin', self.servers[0])
# Assign user to role
role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
RbacBase().add_user_role(role_list, RestConnection(self.servers[0]), 'builtin')
if self.num_buckets > 10:
BaseTestCase.change_max_buckets(self, self.num_buckets)
self.log.info("============== SwapRebalanceBase setup was finished for test #{0} {1} =============="
.format(self.case_number, self._testMethodName))
SwapRebalanceBase._log_start(self)
except Exception as e:
self.cluster_helper.shutdown()
self.fail(e)
@staticmethod
def common_tearDown(self):
self.cluster_helper.shutdown()
test_failed = (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures or self._resultForDoCleanups.errors)) \
or (hasattr(self, '_exc_info') and self._exc_info()[1] is not None)
if test_failed and TestInputSingleton.input.param("stop-on-failure", False)\
or self.input.param("skip_cleanup", False):
self.log.warning("CLEANUP WAS SKIPPED")
else:
SwapRebalanceBase.reset(self)
SwapRebalanceBase._log_finish(self)
# Remove rbac user in teardown
try:
role_del = ['cbadminbucket']
RbacBase().remove_user_role(role_del, RestConnection(
self.servers[0]))
except:
pass
@staticmethod
def reset(self):
self.log.info("============== SwapRebalanceBase cleanup was started for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
self.log.info("Stopping load in Teardown")
SwapRebalanceBase.stop_load(self.loaders)
for server in self.servers:
rest = RestConnection(server)
if rest._rebalance_progress_status() == 'running':
self.log.warning("rebalancing is still running, test should be verified")
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
for server in self.servers:
ClusterOperationHelper.cleanup_cluster([server])
if server.data_path:
rest = RestConnection(server)
rest.set_data_path(data_path=server.data_path)
ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
self.log.info("============== SwapRebalanceBase cleanup was finished for test #{0} {1} =============="\
.format(self.case_number, self._testMethodName))
@staticmethod
def enable_diag_eval_on_non_local_hosts(self, master):
"""
Enable diag/eval to be run on non-local hosts.
:param master: Node information of the master node of the cluster
:return: Nothing
"""
remote = RemoteMachineShellConnection(master)
output, error = remote.enable_diag_eval_on_non_local_hosts()
if "ok" not in output:
self.log.error("Error in enabling diag/eval on non-local hosts on {}. {}".format(master.ip, output))
raise Exception("Error in enabling diag/eval on non-local hosts on {}".format(master.ip))
else:
self.log.info("Enabled diag/eval for non-local hosts from {}".format(master.ip))
@staticmethod
def _log_start(self):
try:
msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
RestConnection(self.servers[0]).log_client_error(msg)
except:
pass
@staticmethod
def _log_finish(self):
try:
msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
RestConnection(self.servers[0]).log_client_error(msg)
except:
pass
@staticmethod
def sleep(self, timeout=1, message=""):
self.log.info("sleep for {0} secs. {1} ...".format(timeout, message))
time.sleep(timeout)
@staticmethod
def _create_default_bucket(self, replica=1):
name = "default"
master = self.servers[0]
rest = RestConnection(master)
helper = RestHelper(RestConnection(master))
if not helper.bucket_exists(name):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
info = rest.get_nodes_self()
available_ram = info.memoryQuota * node_ram_ratio
rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram), replicaNumber=replica,
storageBackend=self.bucket_storage)
ready = BucketOperationHelper.wait_for_memcached(master, name)
self.assertTrue(ready, msg="wait_for_memcached failed")
self.assertTrue(helper.bucket_exists(name),
msg="unable to create {0} bucket".format(name))
@staticmethod
def _create_multiple_buckets(self, replica=1):
master = self.servers[0]
created = BucketOperationHelper.create_multiple_buckets(master, replica, howmany=self.num_buckets,
bucket_storage=self.bucket_storage)
self.assertTrue(created, "unable to create multiple buckets")
rest = RestConnection(master)
buckets = rest.get_buckets()
for bucket in buckets:
ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
self.assertTrue(ready, msg="wait_for_memcached failed")
# Used for items verification active vs. replica
@staticmethod
def items_verification(test, master):
rest = RestConnection(master)
# Verify items count across all node
timeout = 600
for bucket in rest.get_buckets():
verified = RebalanceHelper.wait_till_total_numbers_match(master, bucket.name, timeout_in_seconds=timeout)
test.assertTrue(verified, "Lost items!!.. failing test in {0} secs".format(timeout))
@staticmethod
def start_load_phase(self, master):
loaders = []
rest = RestConnection(master)
for bucket in rest.get_buckets():
loader = dict()
loader["mcsoda"] = LoadWithMcsoda(master, self.keys_count, bucket=bucket.name,
rest_password=master.rest_password, prefix=str(bucket.name), port=8091)
loader["mcsoda"].cfg["exit-after-creates"] = 1
loader["mcsoda"].cfg["json"] = 0
loader["thread"] = Thread(target=loader["mcsoda"].load_data, name='mcloader_' + bucket.name)
loader["thread"].daemon = True
loaders.append(loader)
for loader in loaders:
loader["thread"].start()
return loaders
@staticmethod
def start_access_phase(self, master):
loaders = []
rest = RestConnection(master)
for bucket in rest.get_buckets():
loader = dict()
loader["mcsoda"] = LoadWithMcsoda(master, self.keys_count // 2, bucket=bucket.name,
rest_password=master.rest_password, prefix=str(bucket.name), port=8091)
loader["mcsoda"].cfg["ratio-sets"] = 0.8
loader["mcsoda"].cfg["ratio-hot"] = 0.2
loader["mcsoda"].cfg["ratio-creates"] = 0.5
loader["mcsoda"].cfg["ratio-deletes"] = self.ratio_deletes
loader["mcsoda"].cfg["ratio-expirations"] = self.ratio_expiry
loader["mcsoda"].cfg["json"] = 0
loader["thread"] = Thread(target=loader["mcsoda"].load_data, name='mcloader_' + bucket.name)
loader["thread"].daemon = True
loaders.append(loader)
for loader in loaders:
loader["thread"].start()
return loaders
@staticmethod
def stop_load(loaders, do_stop=True):
if do_stop:
for loader in loaders:
loader["mcsoda"].load_stop()
for loader in loaders:
if do_stop:
loader["thread"].join(300)
else:
loader["thread"].join()
@staticmethod
def create_buckets(self):
if self.num_buckets == 1:
SwapRebalanceBase._create_default_bucket(self, replica=self.replica)
else:
SwapRebalanceBase._create_multiple_buckets(self, replica=self.replica)
@staticmethod
def verification_phase(test, master):
# Stop loaders
SwapRebalanceBase.stop_load(test.loaders)
test.log.info("DONE DATA ACCESS PHASE")
test.log.info("VERIFICATION PHASE")
rest = RestConnection(master)
servers_in_cluster = []
nodes = rest.get_nodes()
for server in test.servers:
for node in nodes:
if node.ip == server.ip and node.port == server.port:
servers_in_cluster.append(server)
time.sleep(60)
SwapRebalanceBase.items_verification(test, master)
@staticmethod
def _common_test_body_swap_rebalance(self, do_stop_start=False):
master = self.servers[0]
rest = RestConnection(master)
num_initial_servers = self.num_initial_servers
creds = self.input.membase_settings
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.swap_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
for node in optNodesIds:
self.log.info("removing node {0} and rebalance afterwards".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.swap_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
if self.do_access:
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
self.log.info("SWAP REBALANCE PHASE")
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=optNodesIds)
if do_stop_start:
# Rebalance is stopped at 20%, 40% and 60% completion
retry = 0
for expected_progress in (20, 40, 60):
self.log.info("STOP/START SWAP REBALANCE PHASE WITH PROGRESS {0}%".
format(expected_progress))
while True:
progress = rest._rebalance_progress()
if progress < 0:
self.log.error("rebalance progress code : {0}".format(progress))
break
elif progress == 100:
self.log.warning("Rebalance has already reached 100%")
break
elif progress >= expected_progress:
self.log.info("Rebalance will be stopped with {0}%".format(progress))
stopped = rest.stop_rebalance()
self.assertTrue(stopped, msg="unable to stop rebalance")
SwapRebalanceBase.sleep(self, 20)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=optNodesIds)
break
elif retry > 100:
break
else:
retry += 1
SwapRebalanceBase.sleep(self, 1)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(optNodesIds))
SwapRebalanceBase.verification_phase(self, master)
@staticmethod
def _common_test_body_failed_swap_rebalance(self):
master = self.servers[0]
rest = RestConnection(master)
num_initial_servers = self.num_initial_servers
creds = self.input.membase_settings
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.num_swap)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.swap_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
# When swapping all the nodes
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
for node in optNodesIds:
self.log.info("removing node {0} and rebalance afterwards".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.num_swap]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.swap_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
self.log.info("SWAP REBALANCE PHASE")
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()],
ejectedNodes=optNodesIds)
SwapRebalanceBase.sleep(self, 10, "Rebalance should start")
self.log.info("FAIL SWAP REBALANCE PHASE @ {0}".format(self.percentage_progress))
reached = RestHelper(rest).rebalance_reached(self.percentage_progress)
if reached and RestHelper(rest).is_cluster_rebalanced():
# handle situation when rebalance failed at the beginning
self.log.error('seems rebalance failed!')
rest.print_UI_logs()
self.fail("rebalance failed even before killing memcached")
bucket = rest.get_buckets()[0].name
pid = None
if self.swap_orchestrator and not self.cluster_run:
# get PID via remote connection if master is a new node
shell = RemoteMachineShellConnection(master)
pid = shell.get_memcache_pid()
shell.disconnect()
else:
times = 2
if self.cluster_run:
times = 20
for i in range(times):
try:
_mc = MemcachedClientHelper.direct_client(master, bucket)
pid = _mc.stats()["pid"]
break
except (EOFError, KeyError) as e:
self.log.error("{0}.Retry in 2 sec".format(e))
SwapRebalanceBase.sleep(self, 2)
if pid is None:
# sometimes pid is not returned by mc.stats()
shell = RemoteMachineShellConnection(master)
pid = shell.get_memcache_pid()
shell.disconnect()
if pid is None:
self.fail("impossible to get a PID")
command = "os:cmd(\"kill -9 {0} \")".format(pid)
self.log.info(command)
killed = rest.diag_eval(command)
self.log.info("killed {0}:{1}?? {2} ".format(master.ip, master.port, killed))
self.log.info("sleep for 10 sec after kill memcached")
SwapRebalanceBase.sleep(self, 10)
# we can't get stats for new node when rebalance falls
if not self.swap_orchestrator:
ClusterOperationHelper._wait_warmup_completed(self, [master], bucket, wait_time=600)
i = 0
# we expect that rebalance will be failed
try:
rest.monitorRebalance()
except RebalanceFailedException:
# retry rebalance if it failed
self.log.warning("Rebalance failed but it's expected")
SwapRebalanceBase.sleep(self, 30)
self.assertFalse(RestHelper(rest).is_cluster_rebalanced(), msg="cluster need rebalance")
knownNodes = rest.node_statuses();
self.log.info("nodes are still in cluster: {0}".format([(node.ip, node.port) for node in knownNodes]))
ejectedNodes = list(set(optNodesIds) & {node.id for node in knownNodes})
rest.rebalance(otpNodes=[node.id for node in knownNodes], ejectedNodes=ejectedNodes)
SwapRebalanceBase.sleep(self, 10, "Wait for rebalance to start")
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(toBeEjectedNodes))
else:
self.log.info("rebalance completed successfully")
SwapRebalanceBase.verification_phase(self, master)
@staticmethod
def _add_back_failed_node(self, do_node_cleanup=False):
master = self.servers[0]
rest = RestConnection(master)
creds = self.input.membase_settings
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(self.servers, len(self.servers) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
current_nodes = RebalanceHelper.getOtpNodeIds(master)
self.log.info("current nodes : {0}".format(current_nodes))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.failover_factor)
optNodesIds = [node.id for node in toBeEjectedNodes]
# List of servers that will not be failed over
not_failed_over = []
for server in self.servers:
if self.cluster_run:
if server.port not in [node.port for node in toBeEjectedNodes]:
not_failed_over.append(server)
self.log.info("Node {0}:{1} not failed over".format(server.ip, server.port))
else:
if server.ip not in [node.ip for node in toBeEjectedNodes]:
not_failed_over.append(server)
self.log.info("Node {0}:{1} not failed over".format(server.ip, server.port))
if self.fail_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
# When swapping all the nodes
if self.num_swap is len(current_nodes):
optNodesIds.append(content)
else:
optNodesIds[0] = content
master = not_failed_over[-1]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
# Failover selected nodes
for node in optNodesIds:
self.log.info("failover node {0} and rebalance afterwards".format(node))
rest.fail_over(node)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], \
ejectedNodes=optNodesIds)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(optNodesIds))
# Add back the same failed over nodes
# Cleanup the node, somehow
# TODO: cluster_run?
if do_node_cleanup:
pass
# Make rest connection with node part of cluster
rest = RestConnection(master)
# Given the optNode, find ip
add_back_servers = []
nodes = rest.get_nodes()
for server in nodes:
if isinstance(server.ip, str):
add_back_servers.append(server)
final_add_back_servers = []
for server in self.servers:
if self.cluster_run:
if server.port not in [serv.port for serv in add_back_servers]:
final_add_back_servers.append(server)
else:
if server.ip not in [serv.ip for serv in add_back_servers]:
final_add_back_servers.append(server)
for server in final_add_back_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], ejectedNodes=[])
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(add_back_servers))
SwapRebalanceBase.verification_phase(self, master)
@staticmethod
def _failover_swap_rebalance(self):
master = self.servers[0]
rest = RestConnection(master)
creds = self.input.membase_settings
num_initial_servers = self.num_initial_servers
intial_severs = self.servers[:num_initial_servers]
self.log.info("CREATE BUCKET PHASE")
SwapRebalanceBase.create_buckets(self)
# Cluster all starting set of servers
self.log.info("INITIAL REBALANCE PHASE")
status, servers_rebalanced = RebalanceHelper.rebalance_in(intial_severs, len(intial_severs) - 1)
self.assertTrue(status, msg="Rebalance was failed")
self.log.info("DATA LOAD PHASE")
self.loaders = SwapRebalanceBase.start_load_phase(self, master)
# Wait till load phase is over
SwapRebalanceBase.stop_load(self.loaders, do_stop=False)
self.log.info("DONE LOAD PHASE")
# Start the swap rebalance
self.log.info("current nodes : {0}".format(RebalanceHelper.getOtpNodeIds(master)))
toBeEjectedNodes = RebalanceHelper.pick_nodes(master, howmany=self.failover_factor)
optNodesIds = [node.id for node in toBeEjectedNodes]
if self.fail_orchestrator:
status, content = ClusterOperationHelper.find_orchestrator(master)
self.assertTrue(status, msg="Unable to find orchestrator: {0}:{1}".\
format(status, content))
optNodesIds[0] = content
self.log.info("FAILOVER PHASE")
# Failover selected nodes
for node in optNodesIds:
self.log.info("failover node {0} and rebalance afterwards".format(node))
rest.fail_over(node)
self.assertTrue(rest.monitorRebalance(),
msg="failed after failover of {0}".format(node))
new_swap_servers = self.servers[num_initial_servers:num_initial_servers + self.failover_factor]
for server in new_swap_servers:
otpNode = rest.add_node(creds.rest_username, creds.rest_password, server.ip, server.port)
msg = "unable to add node {0} to the cluster"
self.assertTrue(otpNode, msg.format(server.ip))
if self.fail_orchestrator:
rest = RestConnection(new_swap_servers[0])
master = new_swap_servers[0]
self.log.info("DATA ACCESS PHASE")
self.loaders = SwapRebalanceBase.start_access_phase(self, master)
rest.rebalance(otpNodes=[node.id for node in rest.node_statuses()], \
ejectedNodes=optNodesIds)
self.assertTrue(rest.monitorRebalance(),
msg="rebalance operation failed after adding node {0}".format(new_swap_servers))
SwapRebalanceBase.verification_phase(self, master)
class SwapRebalanceBasicTests(unittest.TestCase):
def setUp(self):
SwapRebalanceBase.common_setup(self)
def tearDown(self):
SwapRebalanceBase.common_tearDown(self)
def do_test(self):
SwapRebalanceBase._common_test_body_swap_rebalance(self, do_stop_start=False)
class SwapRebalanceStartStopTests(unittest.TestCase):
def setUp(self):
SwapRebalanceBase.common_setup(self)
def tearDown(self):
SwapRebalanceBase.common_tearDown(self)
def do_test(self):
SwapRebalanceBase._common_test_body_swap_rebalance(self, do_stop_start=True)
class SwapRebalanceFailedTests(unittest.TestCase):
def setUp(self):
SwapRebalanceBase.common_setup(self)
def tearDown(self):
SwapRebalanceBase.common_tearDown(self)
def test_failed_swap_rebalance(self):
self.percentage_progress = self.input.param("percentage_progress", 50)
SwapRebalanceBase._common_test_body_failed_swap_rebalance(self)
# Not cluster_run friendly, yet
def test_add_back_failed_node(self):
SwapRebalanceBase._add_back_failed_node(self, do_node_cleanup=False)
def test_failover_swap_rebalance(self):
SwapRebalanceBase._failover_swap_rebalance(self)
|
test3.py | # 多线程,自动创建文件夹,每个页面单独存储一个文件夹
import requests
import threading
import re
import time
import queue
import os
from bs4 import BeautifulSoup
string = 'https://www.quanjing.com/category/1286521/'
url_queue = queue.Queue()
pipei = re.compile('lowsrc="(.*?)" m=') #
def get_url(page):
for i in range(1, page+1):
url = string + str(i) + '.html'
url_queue.put(url)
# print(url_queue.queue)
def spider(url_queue):
url = url_queue.get()
floder_count = url[-7:-5]
if floder_count[0] == '/':
floder_name = floder_count[1]
else:
floder_name = floder_count
os.mkdir('第{0}页'.format(floder_name)) # mkdir只能创建一级目录,makedirs可以创建多级目录,可能是以参数中的‘/’分级
html = requests.get(url=url).text
soup = BeautifulSoup(html, 'lxml')
ul = soup.find_all(attrs={"class": "gallery_list"})
# print(ul)
lianjies = re.findall(pipei, str(ul)) # 正则匹配必须是字符串类型
i = 1
for lianjie in lianjies:
# print(lianjie)
result = requests.get(url=lianjie).content
with open('第{0}页\{1}.jpg'.format(floder_name, i), 'ab') as f:
f.write(result)
print('第{0}页第{1}张存储完成'.format(floder_name, i))
i += 1
if not url_queue.empty():
spider(url_queue)
def main():
queue_list = []
queue_count = 3
for i in range(queue_count):
t = threading.Thread(target=spider, args=(url_queue, ))
queue_list.append(t)
for t in queue_list:
t.start()
for t in queue_list:
t.join()
if __name__ == '__main__':
page = int(input("请输入需要爬取的页数:"))
get_url(page)
start_time = time.time()
main()
print("test3用时:%f" % (time.time() - start_time)) |
gopro_server.py | import cv2
import json
import numpy as np
import os
import pickle
import RPi.GPIO as GPIO
import requests
import socket
import struct
import time
from getmac import get_mac_address
from threading import Thread
DEBUG_LOGS = False
RAW_PICTURES_FOLDER = './pictures/raw'
PROCESSED_PICTURES_FOLDER = './pictures/processed'
CHANNEL = 10 # GPIO button channel
EVENT_DELAY = 5 # In seconds
class MyGoPro:
### Static methods ###
def parse_media(media):
media = [m['n'] for m in media]
pictures = [m for m in media if m.endswith('.JPG')]
videos = [m for m in media if m.endswith('.MP4')]
return pictures, videos
### Methods ###
def __init__(self):
self.ip_addr = '10.5.5.9'
self.get_mac_address()
self.keep_alive()
self.load_calibration_data()
print('Init...')
# TODO(pmustiere): Wait for a first thing to work before doing the other requests
requests.get('http://10.5.5.9/gp/gpControl/setting/53/1') # Default boot mode to picture, just in case
self.refresh_pictures()
print('Ready\n')
def get_mac_address(self):
# Retrieve MAC address
self.mac_address = get_mac_address(ip='10.5.5.9')
if not self.mac_address:
print('ERROR: Failed to get MAC address')
exit(1)
self.mac_address = str(self.mac_address)
if len(self.mac_address) == 17: # Remove separators, if needed
self.mac_address = self.mac_address.replace(self.mac_address[2], '')
def keep_alive(self):
def keep_alive_function(ip_addr):
while True:
# Send power on command
if DEBUG_LOGS:
print('[keep_alive_thread] Waking up GoPro...')
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
data = bytes('FFFFFFFFFFFF' + self.mac_address * 16, 'utf-8')
message = b''
for i in range(0, len(data), 2):
message += struct.pack(b'B', int(data[i: i + 2], 16))
sock.sendto(message, (self.ip_addr, 9))
if DEBUG_LOGS:
print('[keep_alive_thread] Awake\n')
while True:
try:
if DEBUG_LOGS:
print('[keep_alive_thread] Keep alive')
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto('_GPHD_:0:0:2:0.000000\n'.encode(), (ip_addr, 8554))
time.sleep(2.5)
response = requests.get('http://10.5.5.9:8080/gp/gpControl/status', timeout=1)
response.raise_for_status()
except:
print('[keep_alive_thread] GoPro not responding, going to wake it up')
self.keep_alive_thread = Thread(target=keep_alive_function, args=(self.ip_addr,))
self.keep_alive_thread.start()
def load_calibration_data(self):
with open('calibration/results/calibration_data.pkl', 'rb') as f:
self.calibration_data = pickle.load(f)
print('Loaded calibration data')
def list_media(self):
response = requests.get('http://10.5.5.9:8080/gp/gpMediaList')
response.raise_for_status()
response = response.json()
self.gopro_id = response['media'][0]['d']
self.gopro_pictures, self.gopro_videos = MyGoPro.parse_media(response['media'][0]['fs'])
def list_downloaded_pictures(self):
self.downloaded_pictures = sorted(os.listdir(RAW_PICTURES_FOLDER))
def list_processed_pictures(self):
self.processed_pictures = sorted(os.listdir(PROCESSED_PICTURES_FOLDER))
def get_picture(self, picture):
response = requests.get('http://10.5.5.9:8080/videos/DCIM/{}/{}'.format(self.gopro_id, picture), stream=True)
response.raise_for_status()
with open('{}/{}'.format(RAW_PICTURES_FOLDER, picture), 'wb') as f:
for chunk in response:
f.write(chunk)
print('Downloaded picture \'{}\''.format(picture))
def process_picture(self, picture):
DIM, K, D = self.calibration_data
img = cv2.imread('{}/{}'.format(RAW_PICTURES_FOLDER, picture))
h,w = img.shape[:2]
map1, map2 = cv2.fisheye.initUndistortRectifyMap(K, D, np.eye(3), K, DIM, cv2.CV_16SC2)
undistorted_img = cv2.remap(img, map1, map2, interpolation=cv2.INTER_LINEAR, borderMode=cv2.BORDER_CONSTANT)
cv2.imwrite('{}/{}'.format(PROCESSED_PICTURES_FOLDER, picture), undistorted_img)
print('Processed picture \'{}\''.format(picture))
def refresh_pictures(self, update=True):
def refresh_pictures_function():
while True:
try:
time.sleep(1)
if DEBUG_LOGS:
print('[refresh_pictures_thread] Refreshing pictures...')
self.list_media()
self.list_downloaded_pictures()
for picture in [p for p in self.gopro_pictures if p not in self.downloaded_pictures]:
self.get_picture(picture)
self.list_downloaded_pictures()
self.list_processed_pictures()
for picture in [p for p in self.downloaded_pictures if p not in self.processed_pictures]:
self.process_picture(picture)
except Exception as e:
print('[refresh_pictures_thread] got an error: {}'.format(e))
self.refresh_pictures_thread = Thread(target=refresh_pictures_function)
self.refresh_pictures_thread.start()
def take_picture(self):
print('Taking a picture...')
response = requests.get('http://10.5.5.9/gp/gpControl/command/shutter?p=1')
response.raise_for_status()
time.sleep(2)
print('Done\n')
class GPIOButton():
def __init__(self, event_hook):
self.pressed = False
self.last_press = None
self.event_hook = event_hook
GPIO.setmode(GPIO.BOARD)
GPIO.setup(CHANNEL, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
GPIO.add_event_detect(CHANNEL, GPIO.BOTH, callback=self.gpio_event)
def gpio_event(self, channel):
if GPIO.input(CHANNEL):
self.down()
else:
self.up()
def down(self):
self.pressed = True
def up(self):
if self.pressed:
self.maybe_event()
self.pressed = False
def maybe_event(self):
current_time = time.time()
if not self.last_press or current_time-self.last_press > EVENT_DELAY:
self.event_hook()
self.last_press = current_time
if __name__ == '__main__':
gopro = MyGoPro()
button = GPIOButton(gopro.take_picture)
while True:
time.sleep(60)
|
05-join.py | import time
import threading
def myfunc(name):
print(f"myfunc started with {name}")
time.sleep(10)
print("myfunc ended")
if __name__ == '__main__':
print('main started')
#myfunc('realpython')
t=threading.Thread(target=myfunc, args=['realpython'])
t.start()
t.join()
print('main ended') |
utilities.py | #!/usr/bin/python
# -- Content-Encoding: UTF-8 --
"""
Tests utility classes
:license: Apache License 2.0
"""
# JSON-RPC library
from jsonrpclib.SimpleJSONRPCServer import SimpleJSONRPCServer
# Standard library
import threading
# ------------------------------------------------------------------------------
# Test methods
def subtract(minuend, subtrahend):
"""
Using the keywords from the JSON-RPC v2 doc
"""
return minuend - subtrahend
def add(x, y):
return x + y
def update(*args):
return args
def summation(*args):
return sum(args)
def notify_hello(*args):
return args
def get_data():
return ['hello', 5]
def ping():
return True
# ------------------------------------------------------------------------------
# Server utility class
class UtilityServer(object):
"""
Utility start/stop server
"""
def __init__(self):
"""
Sets up members
"""
self._server = None
self._thread = None
def start(self, addr, port):
"""
Starts the server
:param addr: A binding address
:param port: A listening port
:return: This object (for in-line calls)
"""
# Create the server
self._server = server = SimpleJSONRPCServer((addr, port),
logRequests=False)
# Register test methods
server.register_function(summation, 'sum')
server.register_function(summation, 'notify_sum')
server.register_function(notify_hello)
server.register_function(subtract)
server.register_function(update)
server.register_function(get_data)
server.register_function(add)
server.register_function(ping)
server.register_function(summation, 'namespace.sum')
# Serve in a thread
self._thread = threading.Thread(target=server.serve_forever)
self._thread.daemon = True
self._thread.start()
# Allow an in-line instantiation
return self
def get_port(self):
"""
Retrieves the port this server is listening to
"""
return self._server.socket.getsockname()[1]
def stop(self):
"""
Stops the server and waits for its thread to finish
"""
self._server.shutdown()
self._server.server_close()
self._thread.join()
self._server = None
self._thread = None
|
application.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import paho.mqtt.client as mqtt
from gpiozero import LED
from time import sleep
import json
import threading
import os
import sys
led = LED(18)
thing_name = "raspberry-pi"
shadow_topic_prefix = "$aws/things/" + thing_name + "/shadow/name/led"
INTERVAL = 60
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
client.subscribe(shadow_topic_prefix + "/get/accepted")
client.subscribe(shadow_topic_prefix + "/update/delta")
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
payload = json.loads(msg.payload)
if msg.topic == shadow_topic_prefix + "/get/accepted":
if payload.get("state", {}).get("reported"):
set_led(payload["state"]["reported"].get("led"))
print("run report thread to fit the state to reported")
report = threading.Thread(target=report_shadow)
report.start()
else:
print("nothing reported yet")
if payload.get("state", {}).get("delta"):
print("There is delta! (/get/accepted)")
set_led(payload["state"]["delta"].get("led"))
print("run report thread to fit the state to delta")
report = threading.Thread(target=report_shadow)
report.start()
else:
print("no delta")
elif msg.topic == shadow_topic_prefix + "/update/delta":
print("There is delta! (/update/delta)")
set_led(payload["state"].get("led"))
print("run report thread to fit the state to delta")
report = threading.Thread(target=report_shadow)
report.start()
else:
print("unknown topic")
def set_led(led_state):
if led_state == "1":
led.on()
else:
led.off()
if str(led.value) != led_state:
print("Something wrong. Restart the process.")
os.execl(sys.executable, 'python3', __file__)
return
def report_shadow():
message = {"state": {"reported": {"led": str(led.value)}}}
print("report! message is " + str(message))
client.publish(shadow_topic_prefix + "/update", json.dumps(message))
def get_shadow():
client.publish(shadow_topic_prefix + "/get", "")
def subscribing():
client.on_message = on_message
client.loop_forever()
if __name__ == '__main__':
client = mqtt.Client(protocol=mqtt.MQTTv311)
client.on_connect = on_connect
client.connect("beam.soracom.io", 1883, 60)
sub = threading.Thread(target=subscribing)
sub.start()
sleep(0.5)
while True:
print("Get the shadow")
get_shadow()
sleep(INTERVAL)
|
Hiwin_RT605_ArmCommand_Socket_20190627170327.py | #!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
pos_feedback_times = 0
mode_feedback_times = 0
msg_feedback = 1
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
# rospy.loginfo(state)
pub.publish(state)
rate.sleep()
# a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
# s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
# b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
print ("Ready to connect")
#rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global arm_mode_flag,speed_mode_flag,point_data_flag
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##-----------socket client--------
def socket_client():
global Socket,Arm_feedback,data,Socket_sent_flag
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
# Arm_feedback = 0
# socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
# Arm_feedback = 1
# socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
# Arm_feedback = 6
# socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
# Socket_sent_flag = False
# socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join() |
usage.py | import uuid
import time
import hashlib
import os
import getpass
import json
import logging
import socket
import sys
import platform
import multiprocessing as mp
from parsl.dataflow.states import FINAL_FAILURE_STATES
from parsl.version import VERSION as PARSL_VERSION
logger = logging.getLogger(__name__)
def async_process(fn):
""" Decorator function to launch a function as a separate process """
def run(*args, **kwargs):
proc = mp.Process(target=fn, args=args, kwargs=kwargs)
proc.start()
return proc
return run
@async_process
def udp_messenger(domain_name, UDP_IP, UDP_PORT, sock_timeout, message):
"""Send UDP messages to usage tracker asynchronously
This multiprocessing based messenger was written to overcome the limitations
of signalling/terminating a thread that is blocked on a system call. This
messenger is created as a separate process, and initialized with 2 queues,
to_send to receive messages to be sent to the internet.
Args:
- domain_name (str) : Domain name string
- UDP_IP (str) : IP address YYY.YYY.YYY.YYY
- UDP_PORT (int) : UDP port to send out on
- sock_timeout (int) : Socket timeout
- to_send (multiprocessing.Queue) : Queue of outgoing messages to internet
"""
try:
if message is None:
raise ValueError("message was none")
encoded_message = bytes(message, "utf-8")
if encoded_message is None:
raise ValueError("utf-8 encoding of message failed")
if domain_name:
try:
UDP_IP = socket.gethostbyname(domain_name)
except Exception:
# (False, "Domain lookup failed, defaulting to {0}".format(UDP_IP))
pass
if UDP_IP is None:
raise Exception("UDP_IP is None")
if UDP_PORT is None:
raise Exception("UDP_PORT is None")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sock.settimeout(sock_timeout)
sock.sendto(bytes(message, "utf-8"), (UDP_IP, UDP_PORT))
sock.close()
except socket.timeout:
logger.debug("Failed to send usage tracking data: socket timeout")
except OSError as e:
logger.debug("Failed to send usage tracking data: OSError: {}".format(e))
except Exception as e:
logger.debug("Failed to send usage tracking data: Exception: {}".format(e))
class UsageTracker (object):
"""Anonymized Usage Tracking for Parsl.
Client for this is here : https://github.com/Parsl/parsl_tracking
This issue captures the discussion that went into functionality
implemented here : https://github.com/Parsl/parsl/issues/34
"""
def __init__(self, dfk, ip='52.3.111.203', port=50077,
domain_name='tracking.parsl-project.org'):
"""Initialize usage tracking unless the user has opted-out.
We will try to resolve the hostname specified in kwarg:domain_name
and if that fails attempt to use the kwarg:ip. Determining the
IP and sending message is threaded to avoid slowing down DFK
initialization.
Tracks usage stats by inspecting the internal state of the dfk.
Args:
- dfk (DFK object) : Data Flow Kernel object
KWargs:
- ip (string) : IP address
- port (int) : Port number, Default:50077
- domain_name (string) : Domain name, will override IP
Default: tracking.parsl-project.org
"""
self.domain_name = domain_name
self.ip = ip
# The sock timeout will only apply to UDP send and not domain resolution
self.sock_timeout = 5
self.UDP_PORT = port
self.UDP_IP = None
self.procs = []
self.dfk = dfk
self.config = self.dfk.config
self.uuid = str(uuid.uuid4())
self.parsl_version = PARSL_VERSION
self.python_version = "{}.{}.{}".format(sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro)
self.test_mode, self.tracking_enabled = self.check_tracking_enabled()
logger.debug("Tracking status: {}".format(self.tracking_enabled))
logger.debug("Testing mode : {}".format(self.test_mode))
self.initialized = False # Once first message is sent this will be True
def check_tracking_enabled(self):
"""By default tracking is enabled.
If Test mode is set via env variable PARSL_TESTING, a test flag is set
Tracking is disabled if :
1. config["globals"]["usageTracking"] is set to False (Bool)
2. Environment variable PARSL_TRACKING is set to false (case insensitive)
"""
track = True # By default we track usage
test = False # By default we are not in testing mode
testvar = str(os.environ.get("PARSL_TESTING", 'None')).lower()
if testvar == 'true':
test = True
if not self.config.usage_tracking:
track = False
envvar = str(os.environ.get("PARSL_TRACKING", True)).lower()
if envvar == "false":
track = False
return test, track
def construct_start_message(self):
"""Collect preliminary run info at the start of the DFK.
Returns :
- Message dict dumped as json string, ready for UDP
"""
uname = getpass.getuser().encode('latin1')
hashed_username = hashlib.sha256(uname).hexdigest()[0:10]
hname = socket.gethostname().encode('latin1')
hashed_hostname = hashlib.sha256(hname).hexdigest()[0:10]
message = {'uuid': self.uuid,
'uname': hashed_username,
'hname': hashed_hostname,
'test': self.test_mode,
'parsl_v': self.parsl_version,
'python_v': self.python_version,
'os': platform.system(),
'os_v': platform.release(),
'start': time.time()}
return json.dumps(message)
def construct_end_message(self):
"""Collect the final run information at the time of DFK cleanup.
Returns:
- Message dict dumped as json string, ready for UDP
"""
app_count = self.dfk.task_count
site_count = len([x for x in self.dfk.config.executors if x.managed])
app_fails = len([t for t in self.dfk.tasks if
self.dfk.tasks[t]['status'] in FINAL_FAILURE_STATES])
message = {'uuid': self.uuid,
'end': time.time(),
't_apps': app_count,
'sites': site_count,
'c_time': None,
'failed': app_fails,
'test': self.test_mode,
}
return json.dumps(message)
def send_UDP_message(self, message):
"""Send UDP message."""
x = 0
if self.tracking_enabled:
try:
proc = udp_messenger(self.domain_name, self.UDP_IP, self.UDP_PORT, self.sock_timeout, message)
self.procs.append(proc)
except Exception as e:
logger.debug("Usage tracking failed: {}".format(e))
else:
x = -1
return x
def send_message(self):
"""Send message over UDP.
If tracking is disables, the bytes_sent will always be set to -1
Returns:
(bytes_sent, time_taken)
"""
start = time.time()
message = None
if not self.initialized:
message = self.construct_start_message()
self.initialized = True
else:
message = self.construct_end_message()
self.send_UDP_message(message)
end = time.time()
return end - start
def __del__(self):
return self.close()
def close(self):
"""We terminate (SIGTERM) the processes added to the self.procs list """
for proc in self.procs:
proc.terminate()
if __name__ == '__main__':
from parsl import *
set_stream_logger()
workers = ThreadPoolExecutor(max_workers=4)
dfk = DataFlowKernel(executors=[workers])
dfk.cleanup()
|
train.py | import os
import time
from collections import deque
from multiprocessing import Process, Value, Array, Queue
from threading import Thread
import subprocess
import settings
from sources import start_carla, restart_carla
from sources import STOP, get_hparams
from sources import run_agent, AGENT_STATE
from sources import run_trainer, check_weights_size, TRAINER_STATE, CarlaEnv
from sources import ConsoleStats, Commands
from sources import get_carla_exec_command, kill_carla_processes, CarlaEnvSettings, CARLA_SETTINGS_STATE
if __name__ == '__main__':
print('Starting...')
# overal start time
start_time = time.time()
# Create required folders
os.makedirs('models', exist_ok=True)
os.makedirs('tmp', exist_ok=True)
os.makedirs('checkpoint', exist_ok=True)
# Kill Carla processes if there are any and start simulator
start_carla()
# Load hparams if they are being saved by trainer
hparams = get_hparams()
if hparams:
# If everything is ok, update start time by previous running time
start_time -= hparams['duration']
# Spawn limited trainer process and get weights' size
print('Calculating weights size...')
weights_size = Value('L', 0)
p = Process(target=check_weights_size, args=(hparams['model_path'] if hparams else False, weights_size), daemon=True)
p.start()
while weights_size.value == 0:
time.sleep(0.01)
p.join()
# A bunch of variabled and shared variables used to set all parts of ARTDQN and communicate them
duration = Value('d')
episode = Value('L', hparams['episode'] if hparams else 0)
epsilon = Array('d', hparams['epsilon'] if hparams else [settings.START_EPSILON, settings.EPSILON_DECAY, settings.MIN_EPSILON])
discount = Value('d', hparams['discount'] if hparams else settings.DISCOUNT)
update_target_every = Value('L', hparams['update_target_every'] if hparams else settings.UPDATE_TARGET_EVERY)
last_target_update = hparams['last_target_update'] if hparams else 0
min_reward = Value('f', hparams['min_reward'] if hparams else settings.MIN_REWARD)
agent_show_preview = []
for agent in range(settings.AGENTS):
if hparams:
agent_show_preview.append(Array('f', hparams['agent_show_preview'][agent]))
else:
agent_show_preview.append(Array('f', [(agent + 1) in settings.AGENT_SHOW_PREVIEW, 0, 0, 0, 0, 0]))
save_checkpoint_every = Value('L', hparams['save_checkpoint_every'] if hparams else settings.SAVE_CHECKPOINT_EVERY)
seconds_per_episode = Value('L', hparams['seconds_per_episode'] if hparams else settings.SECONDS_PER_EPISODE)
weights = Array('c', weights_size.value)
weights_iteration = Value('L', hparams['weights_iteration'] if hparams else 0)
transitions = Queue()
tensorboard_stats = Queue()
trainer_stats = Array('f', [0, 0])
carla_check = None
episode_stats = Array('d', [-10**6, -10**6, -10**6, 0, 0, 0, 0, -10**6, -10**6, -10**6] + [-10**6 for _ in range((CarlaEnv.action_space_size + 1) * 3)])
stop = Value('B', 0)
agent_stats = []
for _ in range(settings.AGENTS):
agent_stats.append(Array('f', [0, 0, 0]))
optimizer = Array('d', [-1, -1, 0, 0, 0, 0])
car_npcs = Array('L', hparams['car_npcs'] if hparams else [settings.CAR_NPCS, settings.RESET_CAR_NPC_EVERY_N_TICKS])
pause_agents = []
for _ in range(settings.AGENTS):
pause_agents.append(Value('B', 0))
# Run Carla settings (weather, NPC control) in a separate thread
carla_settings_threads = []
carla_settings_stats = []
carla_frametimes_list = []
carla_fps_counters = []
carla_fps = []
agents_in_carla_instance = {}
for process_no in range(settings.CARLA_HOSTS_NO):
agents_in_carla_instance[process_no] = []
for agent in range(settings.AGENTS):
carla_instance = 1 if not len(settings.AGENT_CARLA_INSTANCE) or settings.AGENT_CARLA_INSTANCE[agent] > settings.CARLA_HOSTS_NO else settings.AGENT_CARLA_INSTANCE[agent]
agents_in_carla_instance[carla_instance-1].append(pause_agents[agent])
for process_no in range(settings.CARLA_HOSTS_NO):
carla_settings_process_stats = Array('f', [-1, -1, -1, -1, -1, -1])
carla_frametimes = Queue()
carla_frametimes_list.append(carla_frametimes)
carla_fps_counter = deque(maxlen=60)
carla_fps.append(Value('f', 0))
carla_fps_counters.append(carla_fps_counter)
carla_settings_stats.append(carla_settings_process_stats)
carla_settings = CarlaEnvSettings(process_no, agents_in_carla_instance[process_no], stop, car_npcs, carla_settings_process_stats)
carla_settings_thread = Thread(target=carla_settings.update_settings_in_loop, daemon=True)
carla_settings_thread.start()
carla_settings_threads.append([carla_settings_thread, carla_settings])
#Log dir creation
logdir = "logs/{}-{}-{}".format('ddqn', settings.MODEL_NAME, int(time.time()))
gifdir = logdir.replace('logs', 'gifs')
action_val_dir = logdir.replace('logs', 'act_img')
os.makedirs(gifdir, exist_ok=True)
os.makedirs(action_val_dir, exist_ok=True)
# Start trainer process
print('Starting trainer...')
trainer_process = Process(target=run_trainer, args=(hparams['model_path'] if hparams else False, logdir, stop, weights, weights_iteration, episode, epsilon, discount, update_target_every, last_target_update, min_reward, agent_show_preview, save_checkpoint_every, seconds_per_episode, duration, transitions, tensorboard_stats, trainer_stats, episode_stats, optimizer, hparams['models'] if hparams else [], car_npcs, carla_settings_stats, carla_fps), daemon=True)
trainer_process.start()
# Wait for trainer to be ready, it needs to, for example, dump weights that agents are going to update
while trainer_stats[0] != TRAINER_STATE.waiting:
time.sleep(0.01)
# Start one new process for each agent
print('Starting agents...')
agents = []
for agent in range(settings.AGENTS):
carla_instance = 1 if not len(settings.AGENT_CARLA_INSTANCE) or settings.AGENT_CARLA_INSTANCE[agent] > settings.CARLA_HOSTS_NO else settings.AGENT_CARLA_INSTANCE[agent]
p = Process(target=run_agent, args=(agent, gifdir, action_val_dir, carla_instance-1, stop, pause_agents[agent], episode, epsilon, agent_show_preview[agent], weights, weights_iteration, transitions, tensorboard_stats, agent_stats[agent], carla_frametimes_list[carla_instance-1], seconds_per_episode), daemon=True)
p.start()
agents.append(p)
print('Ready')
# Start printing stats to a console
print('\n'*(settings.AGENTS+22))
console_stats = ConsoleStats(stop, duration, start_time, episode, epsilon, trainer_stats, agent_stats, episode_stats, carla_fps, weights_iteration, optimizer, carla_settings_threads, seconds_per_episode)
console_stats_thread = Thread(target=console_stats.print, daemon=True)
console_stats_thread.start()
# Create commands' object
commands = Commands(stop, epsilon, discount, update_target_every, min_reward, save_checkpoint_every, seconds_per_episode, agent_show_preview, optimizer, car_npcs)
# Main loop
while True:
# If everything is running or carla broke...
if stop.value in[STOP.running, STOP.carla_simulator_error, STOP.restarting_carla_simulator, STOP.carla_simulator_restarted]:
# ...and all agents return an error
if any([state[0] == AGENT_STATE.error for state in agent_stats]):
# If it's a running state, set it to carla error
if stop.value == STOP.running:
stop.value = STOP.carla_simulator_error
for process_no in range(settings.CARLA_HOSTS_NO):
carla_fps_counters[process_no].clear()
# If agents are not returning errors, set running state
else:
stop.value = STOP.running
carla_check = None
# Append new frametimes from carla for stats
if not stop.value == STOP.carla_simulator_error:
for process_no in range(settings.CARLA_HOSTS_NO):
for _ in range(carla_frametimes_list[process_no].qsize()):
try:
carla_fps_counters[process_no].append(carla_frametimes_list[process_no].get(True, 0.1))
except:
break
carla_fps[process_no].value = len(carla_fps_counters[process_no]) / sum(carla_fps_counters[process_no]) if sum(carla_fps_counters[process_no]) > 0 else 0
# If carla broke
if stop.value == STOP.carla_simulator_error and settings.CARLA_HOSTS_TYPE == 'local':
# First check, set a timer because...
if carla_check is None:
carla_check = time.time()
# ...we give it 15 seconds to possibly recover, if not...
if time.time() > carla_check + 15:
# ... set Carla restart state and try to restart it
stop.value = STOP.restarting_carla_simulator
if settings.CARLA_HOSTS_TYPE == 'local':
kill_carla_processes()
for process_no in range(settings.CARLA_HOSTS_NO):
carla_settings_threads[process_no][1].clean_carnpcs()
carla_settings_threads[process_no][1].restart = True
carla_fps_counters[process_no].clear()
carla_fps[process_no].value = 0
for process_no in range(settings.CARLA_HOSTS_NO):
while not carla_settings_threads[process_no][1].state == CARLA_SETTINGS_STATE.restarting:
time.sleep(0.1)
restart_carla()
for process_no in range(settings.CARLA_HOSTS_NO):
carla_settings_threads[process_no][1].restart = False
stop.value = STOP.carla_simulator_restarted
# When Carla restarts, give it up to 60 seconds, then try again if failed
if stop.value == STOP.restarting_carla_simulator and time.time() > carla_check + 60:
stop.value = STOP.carla_simulator_error
carla_check = time.time() - 15
# Process commands
commands.process()
# If stopping - cleanup and exit
if stop.value == STOP.stopping:
# Trainer process already "knows" that, just wait for it to exit
trainer_process.join()
# The same for all agents
for agent in agents:
agent.join()
# ... and Carla settings
for process_no in range(settings.CARLA_HOSTS_NO):
carla_settings_threads[process_no][0].join()
# Close Carla
kill_carla_processes()
stop.value = STOP.stopped
time.sleep(1)
break
time.sleep(0.01)
|
thermal-sensor.py | from connection import *
import connection, threading, socket
from datetime import datetime
value = -1
temperatureTimes = {}
with open(SOURCE_FILE, mode='r+') as csv_file:
for row in csv_file:
temperatureTimes[row.split(',')[1]] = row.split(',')[2]
def parseTemp(text):
res = ""
for i in text:
if i.isnumeric() or i == '.' or i == '-':
res += i
return float(res)
def findInitialValue():
global value
foundFirst = False
initialTime = datetime.strptime(datetime.now().strftime("%#H:%M:%S"),
"%H:%M:%S")
for time in temperatureTimes:
if temperatureTimes[time] != "error":
temp = datetime.strptime(time, "%H:%M:%S")
if foundFirst:
if (temp > initialTime):
break
else:
value = parseTemp(temperatureTimes[time])
else:
if temp < initialTime:
foundFirst = True
def getValue():
global value
while (True):
try:
time = datetime.now().strftime("%#H:%M:%S")
newValue = temperatureTimes[time]
if newValue != "error":
value = parseTemp(newValue)
except:
pass
sleep(1)
def sendData():
global i
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while True:
if connection.CONNECTED:
if value != -1:
sock.sendto(
bytes(
"{\"SN\": \"" + SERIAL_NUMBER + "\",\"nickname\":\"" +
connection.NICKNAME +
"\",\"measure\": \"TEMP\", \"value\":" + str(value) +
"}", "utf-8"), ("255.255.255.255", 5005))
print("{\"SN\": \"" + SERIAL_NUMBER +
"\",\"measure\": \"TEMP\", \"value\":" + str(value) +
"}")
sleep(5)
findInitialValue()
checkConnection = threading.Thread(target=waitForConnection)
sender = threading.Thread(target=sendData)
getData = threading.Thread(target=getValue)
getData.start()
checkConnection.start()
sender.start()
while True:
sleep(1)
|
test_flight.py | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import base64
import contextlib
import os
import socket
import struct
import tempfile
import threading
import time
import traceback
import pytest
import pyarrow as pa
from pyarrow.compat import tobytes
from pyarrow.util import pathlib
flight = pytest.importorskip("pyarrow.flight")
def resource_root():
"""Get the path to the test resources directory."""
if not os.environ.get("ARROW_TEST_DATA"):
raise RuntimeError("Test resources not found; set "
"ARROW_TEST_DATA to <repo root>/testing")
return pathlib.Path(os.environ["ARROW_TEST_DATA"]) / "flight"
def read_flight_resource(path):
"""Get the contents of a test resource file."""
root = resource_root()
if not root:
return None
try:
with (root / path).open("rb") as f:
return f.read()
except FileNotFoundError:
raise RuntimeError(
"Test resource {} not found; did you initialize the "
"test resource submodule?\n{}".format(root / path,
traceback.format_exc()))
def example_tls_certs():
"""Get the paths to test TLS certificates."""
return {
"root_cert": read_flight_resource("root-ca.pem"),
"certificates": [
flight.CertKeyPair(
cert=read_flight_resource("cert0.pem"),
key=read_flight_resource("cert0.key"),
),
flight.CertKeyPair(
cert=read_flight_resource("cert1.pem"),
key=read_flight_resource("cert1.key"),
),
]
}
def simple_ints_table():
data = [
pa.array([-10, -5, 0, 5, 10])
]
return pa.Table.from_arrays(data, names=['some_ints'])
def simple_dicts_table():
dict_values = pa.array(["foo", "baz", "quux"], type=pa.utf8())
data = [
pa.chunked_array([
pa.DictionaryArray.from_arrays([1, 0, None], dict_values),
pa.DictionaryArray.from_arrays([2, 1], dict_values)]),
]
return pa.Table.from_arrays(data, names=['some_dicts'])
class ConstantFlightServer(flight.FlightServerBase):
"""A Flight server that always returns the same data.
See ARROW-4796: this server implementation will segfault if Flight
does not properly hold a reference to the Table object.
"""
def __init__(self):
super(ConstantFlightServer, self).__init__()
# Ticket -> Table
self.table_factories = {
b'ints': simple_ints_table,
b'dicts': simple_dicts_table,
}
def do_get(self, context, ticket):
# Return a fresh table, so that Flight is the only one keeping a
# reference.
table = self.table_factories[ticket.ticket]()
return flight.RecordBatchStream(table)
class MetadataFlightServer(flight.FlightServerBase):
"""A Flight server that numbers incoming/outgoing data."""
def do_get(self, context, ticket):
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
return flight.GeneratorStream(
table.schema,
self.number_batches(table))
def do_put(self, context, descriptor, reader, writer):
counter = 0
expected_data = [-10, -5, 0, 5, 10]
while True:
try:
batch, buf = reader.read_chunk()
assert batch.equals(pa.RecordBatch.from_arrays(
[pa.array([expected_data[counter]])],
['a']
))
assert buf is not None
client_counter, = struct.unpack('<i', buf.to_pybytes())
assert counter == client_counter
writer.write(struct.pack('<i', counter))
counter += 1
except StopIteration:
return
@staticmethod
def number_batches(table):
for idx, batch in enumerate(table.to_batches()):
buf = struct.pack('<i', idx)
yield batch, buf
class EchoFlightServer(flight.FlightServerBase):
"""A Flight server that returns the last data uploaded."""
def __init__(self, expected_schema=None):
super(EchoFlightServer, self).__init__()
self.last_message = None
self.expected_schema = expected_schema
def do_get(self, context, ticket):
return flight.RecordBatchStream(self.last_message)
def do_put(self, context, descriptor, reader, writer):
if self.expected_schema:
assert self.expected_schema == reader.schema
self.last_message = reader.read_all()
class EchoStreamFlightServer(EchoFlightServer):
"""An echo server that streams individual record batches."""
def do_get(self, context, ticket):
return flight.GeneratorStream(
self.last_message.schema,
self.last_message.to_batches(chunksize=1024))
def list_actions(self, context):
return []
def do_action(self, context, action):
if action.type == "who-am-i":
return iter([flight.Result(context.peer_identity())])
raise NotImplementedError
class GetInfoFlightServer(flight.FlightServerBase):
"""A Flight server that tests GetFlightInfo."""
def get_flight_info(self, context, descriptor):
return flight.FlightInfo(
pa.schema([('a', pa.int32())]),
descriptor,
[
flight.FlightEndpoint(b'', ['grpc://test']),
flight.FlightEndpoint(
b'',
[flight.Location.for_grpc_tcp('localhost', 5005)],
),
],
-1,
-1,
)
class CheckTicketFlightServer(flight.FlightServerBase):
"""A Flight server that compares the given ticket to an expected value."""
def __init__(self, expected_ticket):
super(CheckTicketFlightServer, self).__init__()
self.expected_ticket = expected_ticket
def do_get(self, context, ticket):
assert self.expected_ticket == ticket.ticket
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
return flight.RecordBatchStream(table)
def do_put(self, context, descriptor, reader):
self.last_message = reader.read_all()
class InvalidStreamFlightServer(flight.FlightServerBase):
"""A Flight server that tries to return messages with differing schemas."""
schema = pa.schema([('a', pa.int32())])
def do_get(self, context, ticket):
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
data2 = [pa.array([-10.0, -5.0, 0.0, 5.0, 10.0], type=pa.float64())]
assert data1.type != data2.type
table1 = pa.Table.from_arrays(data1, names=['a'])
table2 = pa.Table.from_arrays(data2, names=['a'])
assert table1.schema == self.schema
return flight.GeneratorStream(self.schema, [table1, table2])
class SlowFlightServer(flight.FlightServerBase):
"""A Flight server that delays its responses to test timeouts."""
def do_get(self, context, ticket):
return flight.GeneratorStream(pa.schema([('a', pa.int32())]),
self.slow_stream())
def do_action(self, context, action):
time.sleep(0.5)
return iter([])
@staticmethod
def slow_stream():
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
yield pa.Table.from_arrays(data1, names=['a'])
# The second message should never get sent; the client should
# cancel before we send this
time.sleep(10)
yield pa.Table.from_arrays(data1, names=['a'])
class HttpBasicServerAuthHandler(flight.ServerAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, creds):
super().__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
pass
def is_valid(self, token):
if not token:
raise ValueError("unauthenticated: token not provided")
token = base64.b64decode(token)
username, password = token.split(b':')
if username not in self.creds:
raise ValueError("unknown user")
if self.creds[username] != password:
raise ValueError("wrong password")
return username
class HttpBasicClientAuthHandler(flight.ClientAuthHandler):
"""An example implementation of HTTP basic authentication."""
def __init__(self, username, password):
super().__init__()
self.username = tobytes(username)
self.password = tobytes(password)
def authenticate(self, outgoing, incoming):
pass
def get_token(self):
return base64.b64encode(self.username + b':' + self.password)
class TokenServerAuthHandler(flight.ServerAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, creds):
super().__init__()
self.creds = creds
def authenticate(self, outgoing, incoming):
username = incoming.read()
password = incoming.read()
if username in self.creds and self.creds[username] == password:
outgoing.write(base64.b64encode(b'secret:' + username))
else:
raise ValueError("unauthenticated: invalid username/password")
def is_valid(self, token):
token = base64.b64decode(token)
if not token.startswith(b'secret:'):
raise ValueError("unauthenticated: invalid token")
return token[7:]
class TokenClientAuthHandler(flight.ClientAuthHandler):
"""An example implementation of authentication via handshake."""
def __init__(self, username, password):
super().__init__()
self.username = username
self.password = password
self.token = b''
def authenticate(self, outgoing, incoming):
outgoing.write(self.username)
outgoing.write(self.password)
self.token = incoming.read()
def get_token(self):
return self.token
@contextlib.contextmanager
def flight_server(server_base, *args, **kwargs):
"""Spawn a Flight server on a free port, shutting it down when done."""
auth_handler = kwargs.pop('auth_handler', None)
tls_certificates = kwargs.pop('tls_certificates', None)
location = kwargs.pop('location', None)
try_connect = kwargs.pop('try_connect', True)
connect_args = kwargs.pop('connect_args', {})
if location is None:
# Find a free port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with contextlib.closing(sock) as sock:
sock.bind(('', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = sock.getsockname()[1]
ctor = flight.Location.for_grpc_tcp
if tls_certificates:
ctor = flight.Location.for_grpc_tls
location = ctor("localhost", port)
else:
port = None
ctor_kwargs = kwargs
server_instance = server_base(*args, **ctor_kwargs)
def _server_thread():
server_instance.run(
location,
auth_handler=auth_handler,
tls_certificates=tls_certificates,
)
thread = threading.Thread(target=_server_thread, daemon=True)
thread.start()
# Wait for server to start
if try_connect:
deadline = time.time() + 5.0
client = flight.FlightClient.connect(location, **connect_args)
while True:
try:
list(client.list_flights())
except Exception as e:
if 'Connect Failed' in str(e):
if time.time() < deadline:
time.sleep(0.025)
continue
else:
raise
break
try:
yield location
finally:
server_instance.shutdown()
thread.join(3.0)
def test_flight_do_get_ints():
"""Try a simple do_get call."""
table = simple_ints_table()
with flight_server(ConstantFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.pandas
def test_do_get_ints_pandas():
"""Try a simple do_get call."""
table = simple_ints_table()
with flight_server(ConstantFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
data = client.do_get(flight.Ticket(b'ints')).read_pandas()
assert list(data['some_ints']) == table.column(0).to_pylist()
def test_flight_do_get_dicts():
table = simple_dicts_table()
with flight_server(ConstantFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
data = client.do_get(flight.Ticket(b'dicts')).read_all()
assert data.equals(table)
def test_flight_do_get_ticket():
"""Make sure Tickets get passed to the server."""
data1 = [pa.array([-10, -5, 0, 5, 10], type=pa.int32())]
table = pa.Table.from_arrays(data1, names=['a'])
with flight_server(
CheckTicketFlightServer,
expected_ticket=b'the-ticket',
) as server_location:
client = flight.FlightClient.connect(server_location)
data = client.do_get(flight.Ticket(b'the-ticket')).read_all()
assert data.equals(table)
def test_flight_get_info():
"""Make sure FlightEndpoint accepts string and object URIs."""
with flight_server(GetInfoFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
info = client.get_flight_info(flight.FlightDescriptor.for_command(b''))
assert info.total_records == -1
assert info.total_bytes == -1
assert info.schema == pa.schema([('a', pa.int32())])
assert len(info.endpoints) == 2
assert len(info.endpoints[0].locations) == 1
assert info.endpoints[0].locations[0] == flight.Location('grpc://test')
assert info.endpoints[1].locations[0] == \
flight.Location.for_grpc_tcp('localhost', 5005)
@pytest.mark.skipif(os.name == 'nt',
reason="Unix sockets can't be tested on Windows")
def test_flight_domain_socket():
"""Try a simple do_get call over a Unix domain socket."""
with tempfile.NamedTemporaryFile() as sock:
sock.close()
location = flight.Location.for_grpc_unix(sock.name)
with flight_server(ConstantFlightServer,
location=location) as server_location:
client = flight.FlightClient.connect(server_location)
reader = client.do_get(flight.Ticket(b'ints'))
table = simple_ints_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
reader = client.do_get(flight.Ticket(b'dicts'))
table = simple_dicts_table()
assert reader.schema.equals(table.schema)
data = reader.read_all()
assert data.equals(table)
@pytest.mark.slow
def test_flight_large_message():
"""Try sending/receiving a large message via Flight.
See ARROW-4421: by default, gRPC won't allow us to send messages >
4MiB in size.
"""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024 * 1024))
], names=['a'])
with flight_server(EchoFlightServer,
expected_schema=data.schema) as server_location:
client = flight.FlightClient.connect(server_location)
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
# Write a single giant chunk
writer.write_table(data, 10 * 1024 * 1024)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_generator_stream():
"""Try downloading a flight of RecordBatches in a GeneratorStream."""
data = pa.Table.from_arrays([
pa.array(range(0, 10 * 1024))
], names=['a'])
with flight_server(EchoStreamFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
writer, _ = client.do_put(flight.FlightDescriptor.for_path('test'),
data.schema)
writer.write_table(data)
writer.close()
result = client.do_get(flight.Ticket(b'')).read_all()
assert result.equals(data)
def test_flight_invalid_generator_stream():
"""Try streaming data with mismatched schemas."""
with flight_server(InvalidStreamFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
with pytest.raises(pa.ArrowException):
client.do_get(flight.Ticket(b'')).read_all()
def test_timeout_fires():
"""Make sure timeouts fire on slow requests."""
# Do this in a separate thread so that if it fails, we don't hang
# the entire test process
with flight_server(SlowFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
action = flight.Action("", b"")
options = flight.FlightCallOptions(timeout=0.2)
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(pa.ArrowIOError):
list(client.do_action(action, options=options))
def test_timeout_passes():
"""Make sure timeouts do not fire on fast requests."""
with flight_server(ConstantFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
options = flight.FlightCallOptions(timeout=5.0)
client.do_get(flight.Ticket(b'ints'), options=options).read_all()
basic_auth_handler = HttpBasicServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
token_auth_handler = TokenServerAuthHandler(creds={
b"test": b"p4ssw0rd",
})
@pytest.mark.slow
def test_http_basic_unauth():
"""Test that auth fails when not authenticated."""
with flight_server(EchoStreamFlightServer,
auth_handler=basic_auth_handler) as server_location:
client = flight.FlightClient.connect(server_location)
action = flight.Action("who-am-i", b"")
with pytest.raises(pa.ArrowException, match=".*unauthenticated.*"):
list(client.do_action(action))
def test_http_basic_auth():
"""Test a Python implementation of HTTP basic authentication."""
with flight_server(EchoStreamFlightServer,
auth_handler=basic_auth_handler) as server_location:
client = flight.FlightClient.connect(server_location)
action = flight.Action("who-am-i", b"")
client.authenticate(HttpBasicClientAuthHandler('test', 'p4ssw0rd'))
identity = next(client.do_action(action))
assert identity.body.to_pybytes() == b'test'
def test_http_basic_auth_invalid_password():
"""Test that auth fails with the wrong password."""
with flight_server(EchoStreamFlightServer,
auth_handler=basic_auth_handler) as server_location:
client = flight.FlightClient.connect(server_location)
action = flight.Action("who-am-i", b"")
client.authenticate(HttpBasicClientAuthHandler('test', 'wrong'))
with pytest.raises(pa.ArrowException, match=".*wrong password.*"):
next(client.do_action(action))
def test_token_auth():
"""Test an auth mechanism that uses a handshake."""
with flight_server(EchoStreamFlightServer,
auth_handler=token_auth_handler) as server_location:
client = flight.FlightClient.connect(server_location)
action = flight.Action("who-am-i", b"")
client.authenticate(TokenClientAuthHandler('test', 'p4ssw0rd'))
identity = next(client.do_action(action))
assert identity.body.to_pybytes() == b'test'
def test_token_auth_invalid():
"""Test an auth mechanism that uses a handshake."""
with flight_server(EchoStreamFlightServer,
auth_handler=token_auth_handler) as server_location:
client = flight.FlightClient.connect(server_location)
with pytest.raises(pa.ArrowException, match=".*unauthenticated.*"):
client.authenticate(TokenClientAuthHandler('test', 'wrong'))
def test_location_invalid():
"""Test constructing invalid URIs."""
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
flight.FlightClient.connect("%")
server = ConstantFlightServer()
with pytest.raises(pa.ArrowInvalid, match=".*Cannot parse URI:.*"):
server.run("%")
@pytest.mark.slow
@pytest.mark.requires_testing_data
def test_tls_fails():
"""Make sure clients cannot connect when cert verification fails."""
certs = example_tls_certs()
with flight_server(
ConstantFlightServer, tls_certificates=certs["certificates"],
connect_args=dict(tls_root_certs=certs["root_cert"]),
) as server_location:
# Ensure client doesn't connect when certificate verification
# fails (this is a slow test since gRPC does retry a few times)
client = flight.FlightClient.connect(server_location)
# gRPC error messages change based on version, so don't look
# for a particular error
with pytest.raises(pa.ArrowIOError):
client.do_get(flight.Ticket(b'ints'))
@pytest.mark.requires_testing_data
def test_tls_do_get():
"""Try a simple do_get call over TLS."""
table = simple_ints_table()
certs = example_tls_certs()
with flight_server(
ConstantFlightServer, tls_certificates=certs["certificates"],
connect_args=dict(tls_root_certs=certs["root_cert"]),
) as server_location:
client = flight.FlightClient.connect(
server_location, tls_root_certs=certs["root_cert"])
data = client.do_get(flight.Ticket(b'ints')).read_all()
assert data.equals(table)
@pytest.mark.requires_testing_data
def test_tls_override_hostname():
"""Check that incorrectly overriding the hostname fails."""
certs = example_tls_certs()
with flight_server(
ConstantFlightServer, tls_certificates=certs["certificates"],
connect_args=dict(tls_root_certs=certs["root_cert"]),
) as server_location:
client = flight.FlightClient.connect(
server_location, tls_root_certs=certs["root_cert"],
override_hostname="fakehostname")
with pytest.raises(pa.ArrowIOError):
client.do_get(flight.Ticket(b'ints'))
def test_flight_do_get_metadata():
"""Try a simple do_get call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
batches = []
with flight_server(MetadataFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
reader = client.do_get(flight.Ticket(b''))
idx = 0
while True:
try:
batch, metadata = reader.read_chunk()
batches.append(batch)
server_idx, = struct.unpack('<i', metadata.to_pybytes())
assert idx == server_idx
idx += 1
except StopIteration:
break
data = pa.Table.from_batches(batches)
assert data.equals(table)
def test_flight_do_put_metadata():
"""Try a simple do_put call with metadata."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with flight_server(MetadataFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
writer, metadata_reader = client.do_put(
flight.FlightDescriptor.for_path(''),
table.schema)
with writer:
for idx, batch in enumerate(table.to_batches(chunksize=1)):
metadata = struct.pack('<i', idx)
writer.write_with_metadata(batch, metadata)
buf = metadata_reader.read()
assert buf is not None
server_idx, = struct.unpack('<i', buf.to_pybytes())
assert idx == server_idx
@pytest.mark.slow
def test_cancel_do_get():
"""Test canceling a DoGet operation on the client side."""
with flight_server(ConstantFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
reader = client.do_get(flight.Ticket(b'ints'))
reader.cancel()
with pytest.raises(pa.ArrowIOError, match=".*Cancel.*"):
reader.read_chunk()
@pytest.mark.slow
def test_cancel_do_get_threaded():
"""Test canceling a DoGet operation from another thread."""
with flight_server(SlowFlightServer) as server_location:
client = flight.FlightClient.connect(server_location)
reader = client.do_get(flight.Ticket(b'ints'))
read_first_message = threading.Event()
stream_canceled = threading.Event()
result_lock = threading.Lock()
raised_proper_exception = threading.Event()
def block_read():
reader.read_chunk()
read_first_message.set()
stream_canceled.wait(timeout=5)
try:
reader.read_chunk()
except pa.ArrowIOError:
with result_lock:
raised_proper_exception.set()
thread = threading.Thread(target=block_read, daemon=True)
thread.start()
read_first_message.wait(timeout=5)
reader.cancel()
stream_canceled.set()
thread.join(timeout=1)
with result_lock:
assert raised_proper_exception.is_set()
|
mynewsplease.py | import os
import sys
import orjson
from loky import cpu_count
import logging
from w3lib.url import canonicalize_url
from newsplease.crawler import commoncrawl_crawler
from newsplease.crawler import commoncrawl_extractor
from multiprocessing import Queue, Pool, Process, active_children
import queue
FINISHED_PRODUCING = None
LINE_CHUNK_SIZE = 1
MAP_CHUNK_SIZE = 256
QUEUE_SIZE_PER_PROCESS = 16
GET_TIMEOUT = 60 * 60 # 1 hour
def die_all():
import time
for child in active_children():
child.kill()
time.sleep(5)
for child in active_children():
child.terminate()
sys.exit(-1)
class ChunkingQueue:
_global_queue = None
@classmethod
def init_global_queue(cls, queue_size):
cls._global_queue = Queue(queue_size)
def __init__(self, chunk_size, num_producers):
self._local_queue = []
self.chunk_size = chunk_size
self.num_producers = num_producers
def put(self, item):
self._local_queue.append(item)
if len(self._local_queue) == self.chunk_size:
self._global_queue.put(self._local_queue)
self._local_queue = []
def get(self):
try:
result = self._global_queue.get(True, GET_TIMEOUT)
except queue.Empty:
print("Timeout out waiting for lines", file=sys.stderr)
die_all()
return result
def item_done(self):
if self._local_queue:
self._global_queue.put(self._local_queue)
self._local_queue = []
def all_producers_done(self):
self._global_queue.put(FINISHED_PRODUCING)
def quiet_mode():
from scrapy.utils.log import configure_logging
configure_logging({"LOG_LEVEL": "ERROR"})
logging.getLogger('requests').setLevel(logging.CRITICAL)
logging.getLogger('readability').setLevel(logging.CRITICAL)
logging.getLogger('PIL').setLevel(logging.CRITICAL)
logging.getLogger('newspaper').setLevel(logging.CRITICAL)
logging.getLogger('newsplease').setLevel(logging.CRITICAL)
logging.getLogger('urllib3').setLevel(logging.CRITICAL)
logging.getLogger('jieba').setLevel(logging.CRITICAL)
def get_download_urls(warc_files_start_date):
cc_news_crawl_names = commoncrawl_crawler.__get_remote_index(warc_files_start_date)
commoncrawl_crawler.__number_of_warc_files_on_cc = len(cc_news_crawl_names)
warc_download_urls = []
for name in cc_news_crawl_names:
warc_download_url = commoncrawl_crawler.__get_download_url(name)
warc_download_urls.append(warc_download_url)
return warc_download_urls
class CommonCrawlProcessor:
def __init__(
self,
callback_on_warc_completed=None,
valid_hosts=None,
start_date=None,
end_date=None,
warc_files_start_date=None,
strict_date=True,
reuse_previously_downloaded_files=True,
local_download_dir_warc=None,
continue_after_error=True,
ignore_unicode_errors=True,
show_download_progress=False,
number_of_extraction_processes=4,
log_level=logging.ERROR,
delete_warc_after_extraction=True,
extractor_cls=commoncrawl_extractor.CommonCrawlExtractor,
):
self.queue = ChunkingQueue(
LINE_CHUNK_SIZE,
number_of_extraction_processes
)
self.valid_hosts = valid_hosts
self.start_date = start_date
self.end_date = end_date
self.warc_files_start_date = warc_files_start_date
self.strict_date = strict_date
self.reuse_previously_downloaded_files = reuse_previously_downloaded_files
self.local_download_dir_warc = local_download_dir_warc
self.continue_after_error = continue_after_error
self.ignore_unicode_errors = ignore_unicode_errors
self.show_download_progress = show_download_progress
self.number_of_extraction_processes = number_of_extraction_processes
self.log_level = log_level
self.delete_warc_after_extraction = delete_warc_after_extraction
self.extractor_cls = extractor_cls
def on_valid_article_extracted(self, article):
article_dict = article.get_dict()
article_dict["country"] = article.country
article_dict["canon_url"] = canonicalize_url(article_dict["url"])
self.queue.put(orjson.dumps(article_dict))
def callback_on_warc_completed(
self, warc_path, counter_article_passed, counter_article_discarded,
counter_article_error, counter_article_total,
):
self.queue.item_done()
print(
(
f"Passed: {counter_article_passed}\t"
f"Discarded: {counter_article_discarded}\t"
f"Error: {counter_article_error}\tTotal: {counter_article_total}\t"
),
file=sys.stderr,
flush=True
)
def crawl_urls(self, warc_download_urls):
quiet_mode()
with Pool(self.number_of_extraction_processes) as pool:
for _ in pool.imap_unordered(
self,
warc_download_urls,
chunksize=MAP_CHUNK_SIZE
):
pass
self.queue.all_producers_done()
def crawl(self):
warc_download_urls = get_download_urls(self.warc_files_start_date)
print("warc_download_urls", file=sys.stderr)
for warc_url in warc_download_urls:
print(warc_url, file=sys.stderr)
process = Process(target=self.crawl_urls, args=(warc_download_urls,))
process.start()
self.print_from_queue()
process.join()
def print_from_queue(self):
while 1:
lines = self.queue.get()
if lines is FINISHED_PRODUCING:
return
else:
for line in lines:
sys.stdout.buffer.write(line)
sys.stdout.buffer.write(b"\n")
sys.stdout.buffer.flush()
def __call__(self, warc_download_url):
quiet_mode()
commoncrawl_extractor = self.extractor_cls()
commoncrawl_extractor.extract_from_commoncrawl(
warc_download_url,
self.on_valid_article_extracted,
callback_on_warc_completed=self.callback_on_warc_completed,
valid_hosts=self.valid_hosts,
start_date=self.start_date, end_date=self.end_date,
strict_date=self.strict_date,
reuse_previously_downloaded_files=self.reuse_previously_downloaded_files,
local_download_dir_warc=self.local_download_dir_warc,
continue_after_error=self.continue_after_error,
ignore_unicode_errors=self.ignore_unicode_errors,
show_download_progress=self.show_download_progress,
log_level=self.log_level,
delete_warc_after_extraction=self.delete_warc_after_extraction
)
def default_cpus():
if "NEWS_PLEASE_WORKERS" in os.environ:
return int(os.environ["NEWS_PLEASE_WORKERS"])
else:
return cpu_count()
def mynewsplease(**kwargs):
kwargs["warc_files_start_date"] = kwargs.get("warc_files_start_date", kwargs.get("start_date"))
number_of_extraction_processes = kwargs.pop(
"number_of_extraction_processes",
default_cpus()
)
ChunkingQueue.init_global_queue(number_of_extraction_processes * QUEUE_SIZE_PER_PROCESS)
ignore_unicode_errors = kwargs.pop(
"ignore_unicode_errors",
True
)
quiet_mode()
CommonCrawlProcessor(
number_of_extraction_processes=number_of_extraction_processes,
ignore_unicode_errors=ignore_unicode_errors,
**kwargs
).crawl()
|
cb2_9_4_sol_1.py | import threading, Queue, time, sys
# Globals (start with a capital letter)
Qin = Queue.Queue()
Qout = Queue.Queue()
Qerr = Queue.Queue()
Pool = []
def report_error():
''' we "report" errors by adding error information to Qerr '''
Qerr.put(sys.exc_info()[:2])
def get_all_from_queue(Q):
''' generator to yield one after the others all items currently
in the Queue Q, without any waiting
'''
try:
while True:
yield Q.get_nowait()
except Queue.Empty:
raise StopIteration
def do_work_from_queue():
''' the get-some-work, do-some-work main loop of worker threads '''
while True:
command, item = Qin.get() # implicitly stops and waits
if command == 'stop':
break
try:
# simulated work functionality of a worker thread
if command == 'process':
result = 'new' + item
else:
raise ValueError, 'Unknown command %r' % command
except:
# unconditional except is right, since we report _all_ errors
report_error()
else:
Qout.put(result)
def make_and_start_thread_pool(number_of_threads_in_pool=5, daemons=True):
''' make a pool of N worker threads, daemonize, and start all of them '''
for i in range(number_of_threads_in_pool):
new_thread = threading.Thread(target=do_work_from_queue)
new_thread.setDaemon(daemons)
Pool.append(new_thread)
new_thread.start()
def request_work(data, command='process'):
''' work requests are posted as (command, data) pairs to Qin '''
Qin.put((command, data))
def get_result():
return Qout.get() # implicitly stops and waits
def show_all_results():
for result in get_all_from_queue(Qout):
print 'Result:', result
def show_all_errors():
for etyp, err in get_all_from_queue(Qerr):
print 'Error:', etyp, err
def stop_and_free_thread_pool():
# order is important: first, request all threads to stop...:
for i in range(len(Pool)):
request_work(None, 'stop')
# ...then, wait for each of them to terminate:
for existing_thread in Pool:
existing_thread.join()
# clean up the pool from now-unused thread objects
del Pool[:]
|
service.py | # encoding: utf-8
# FastCGI-to-WSGI bridge for files/pipes transport (not socket)
#
# Copyright (c) 2012 Openance SARL
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
import win32serviceutil
import os, os.path, sys, platform
from os.path import abspath, dirname
import win32service
import win32event
import win32con
import win32file
from multiprocessing import Process
from multiprocessing.util import get_logger
import ctypes
import traceback
try:
import ConfigParser
except ImportError:
import configparser as ConfigParser
# is using virtualenv under Windows, the sys.exec_prefix used in forking is set the the base directory
# of the virtual environment, not the Scripts subdirectory where the python executable resides
if hasattr(sys, 'real_prefix') and sys.platform == 'win32':
sys.exec_prefix = os.path.join(sys.exec_prefix, 'Scripts')
GenerateConsoleCtrlEvent = ctypes.windll.kernel32.GenerateConsoleCtrlEvent
try:
import multiprocessing.forking as forking
main_path_key = 'main_path'
except ImportError:
import multiprocessing.spawn as forking
main_path_key = 'init_main_from_path'
# Monkey patch the Windows Process implementation to avoid thinking
# That 'PythonService.exe' is a python script
old_get_preparation_data = forking.get_preparation_data
def new_get_preparation_data(name):
d = old_get_preparation_data(name)
if main_path_key in d and d[main_path_key].lower().endswith('.exe'):
del d[main_path_key]
return d
forking.get_preparation_data = new_get_preparation_data
# Do the same monkey patching on billiard which is a fork of
# multiprocessing
try:
import billiard.forking as billiard_forking
main_path_key = 'main_path'
except ImportError:
try:
import billiard.spawn as billiard_forking
main_path_key = 'init_main_from_path'
billiard_old_get_preparation_data = billiard_forking.get_preparation_data
def billiard_new_get_preparation_data(name):
d = billiard_old_get_preparation_data(name)
if main_path_key in d and d[main_path_key].lower().endswith('.exe'):
# del d['main_path']
d[main_path_key] = '__main__.py'
return d
billiard_forking.get_preparation_data = billiard_new_get_preparation_data
except:
pass
def log(msg):
'''Log a message in the Event Viewer as an informational message'''
import servicemanager
servicemanager.LogInfoMsg(str(msg))
def error(msg):
'''Log a message in the Event Viewer as an error message'''
import servicemanager
servicemanager.LogErrorMsg(str(msg))
def initialize_logger(config):
class StdErrWrapper:
"""
Call wrapper for stderr
"""
def write(self, s):
get_logger().info(s)
import logging
logger = get_logger()
values = dict(
format='[%(levelname)s/%(processName)s] %(message)s',
filename=None,
level='INFO',
)
if config and config.has_section('log'):
for (name, value) in config.items('log'):
values[name] = value
if values['filename']:
formatter = logging.Formatter(values['format'])
handler = logging.FileHandler(values['filename'])
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(getattr(logging, values['level'].upper(), logging.INFO))
sys.stderr = StdErrWrapper()
def start_django_command(config, args):
'''
Start a Django management command.
This commands is supposed to start in a spawned (child process).
It tries to import the settings of the project before handling the command.
'''
initialize_logger(config)
log('Starting command : %s' % ' '.join(args))
get_logger().info('Starting command : %s' % ' '.join(args))
from django.core.management import execute_from_command_line
try:
execute_from_command_line(args)
except:
error('Exception occured : %s' % traceback.format_exc())
def spawn_command(config, server_name):
'''
Spawn a command specified in a configuration file and return the process object.
'''
args = [getattr(sys.modules['__main__'], '__file__', __file__)]
args.append(config.get(server_name, 'command'))
args += config.get(server_name, 'parameters').split()
process = Process(target=start_django_command, args=(config, args,))
process.start()
log('Spawned %s' % ' '.join(args))
return process
def start_commands(config):
'''
Spawn all the commands specified in a configuration file and return an array containing all the processes.
'''
processes = []
node_name = platform.node()
services = config.get(node_name, 'run') if config.has_section(node_name) else config.get('services', 'run')
for server_name in services.split():
processes.append(spawn_command(config, server_name))
return processes
def end_commands(processes):
'''
Terminate all the processes in the specified array.
'''
for process in processes:
# GenerateConsoleCtrlEvent(1, process.pid)
process.terminate()
process.join()
def test_commands(base_path=None, timeout=10):
'''
Method to test the spawn and termination of commands present in the configuration file.
'''
config = read_config(base_path)
initialize_logger(config)
processes = start_commands(config)
import time
time.sleep(timeout)
end_commands(processes)
def get_config_modification_handle(path=None):
'''Returns a Directory change handle on the configuration directory.
This handle will be used to restart the Django commands child processes
in case the configuration file has changed in the directory.
'''
if not path:
path = dirname(abspath(__file__))
change_handle = win32file.FindFirstChangeNotification(
path,
0,
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE
)
return change_handle
def read_config(base_path=None, filename='service.ini'):
'''
Reads the configuration file containing processes to spawn information
'''
if not base_path:
base_path = dirname(abspath(__file__))
config = ConfigParser.ConfigParser()
config.optionxform = str
path = os.path.join(base_path, filename)
log(path)
config.read(path)
return config
class DjangoService(win32serviceutil.ServiceFramework):
"""NT Service."""
_svc_name_ = "django-service"
_svc_display_name_ = "Django Background Processes"
_svc_description_ = "Run the Django background Processes"
_config_filename = 'service.ini'
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
log('Initialization')
# create an event that SvcDoRun can wait on and SvcStop
# can set.
self.config = read_config(self._base_path, self._config_filename)
initialize_logger(self.config)
if not self._base_path in sys.path:
sys.path.append(self._base_path)
parent_path = dirname(self._base_path)
if not parent_path in sys.path:
sys.path.append(parent_path)
self.stop_event = win32event.CreateEvent(None, 0, 0, None)
def SvcDoRun(self):
self.ReportServiceStatus(win32service.SERVICE_START_PENDING)
log('starting')
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
self.modification_handle = get_config_modification_handle(self._base_path)
self.configuration_mtime = os.stat(os.path.join(self._base_path, self._config_filename)).st_mtime
keep_running = True
do_start = True
while keep_running:
# do the actual start
if do_start:
self.start()
log('Started. Waiting for stop')
index = win32event.WaitForMultipleObjects([self.stop_event, self.modification_handle], False,
win32event.INFINITE)
if index == 0:
# The stop event has been signaled. Stop execution.
keep_running = False
else:
# re-initialise handle
win32file.FindNextChangeNotification(self.modification_handle)
new_mtime = os.stat(os.path.join(self._base_path, self._config_filename)).st_mtime
if new_mtime != self.configuration_mtime:
self.configuration_mtime = new_mtime
do_start = True
log('Restarting child processes as the configuration has changed')
self.stop()
self.config = read_config(self._base_path, self._config_filename)
else:
do_start = False
win32file.FindCloseChangeNotification(self.modification_handle)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
log('Stopping')
# Do the actual stop
self.stop()
log('Stopped')
win32event.SetEvent(self.stop_event)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
def start(self):
self.processes = start_commands(self.config)
def stop(self):
if self.processes:
end_commands(self.processes)
self.processes = []
node_name = platform.node()
clean = self.config.get(node_name, 'clean') if self.config.has_section(node_name) else self.config.get(
'services', 'clean')
if clean:
for file in clean.split(';'):
try:
os.remove(file)
except:
error("Error while removing %s\n%s" % (file, traceback.format_exc()))
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'test':
test_commands()
else:
DjangoService._base_path = dirname(abspath(__file__))
win32serviceutil.HandleCommandLine(DjangoService)
|
ats.py | from ibapi import wrapper
from ibapi.client import EClient
from ibapi.contract import ContractDetails
from assets import *
import orders
from threading import Thread, Event
import logging
import argparse
class BrokerPlatform(wrapper.EWrapper, EClient):
def __init__(self, port, client_id):
wrapper.EWrapper.__init__(self)
EClient.__init__(self, wrapper=self)
self.client_id = client_id
self.port = port
def error(self, reqId:int, errorCode:int, errorString:str):
super().error(reqId, errorCode, errorString)
pass
def connect(self):
super().connect("127.0.0.1", self.port, self.client_id)
self.thread = Thread(target = self.run)
self.thread.start()
def connectAck(self):
print ("Connected!")
def nextValidId(self, orderId: int):
super().nextValidId(orderId)
orders.next_valid_order_id = orderId
# Until we get this notification we aren't really ready to run
# the rest of the system live.
#
# Now we are ready and really connected.
def find_contract(self, symbol):
asset = Stock(symbol)
self.reqContractDetails(33, asset)
def contractDetails(self, reqId:int, contractDetails:ContractDetails):
super().contractDetails(reqId, contractDetails)
pass
def contractDetailsEnd(self, reqId:int):
super().contractDetailsEnd(reqId)
pass
if "__main__" == __name__:
print("Starting up...")
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("-p", "--port", action="store", type=int, help="TCP port to connect to", dest="port", default=7496)
arg_parser.add_argument("-id", "--id", action="store", type=int, help="Client ID", dest="id", default=1026)
args = arg_parser.parse_args()
print("Using Client ID: ", args.id)
print("Connecting to port: ", args.port)
trader = BrokerPlatform(args.port, args.id)
trader.connect()
trader.find_contract("AAPL")
sym = "a"
while (sym != "" and trader.isConnected()):
print ("Enter symbol")
sym = input()
trader.find_contract(sym)
## look up sym
trader.disconnect()
|
test_fullscreen.py | import threading
from .util import destroy_window, run_test
def fullscreen():
import webview
def _fullscreen(webview):
assert webview.webview_ready(10)
destroy_event.set()
t = threading.Thread(target=_fullscreen, args=(webview,))
t.start()
destroy_event = destroy_window(webview)
webview.create_window('Fullscreen test', 'https://www.example.org', fullscreen=True)
def test_fullscreen():
run_test(fullscreen)
|
state.py | """
The State Compiler is used to execute states in Salt. A state is unlike
an execution module in that instead of just executing a command, it
ensures that a certain state is present on the system.
The data sent to the state calls is as follows:
{ 'state': '<state module name>',
'fun': '<state function name>',
'name': '<the name argument passed to all states>'
'argn': '<arbitrary argument, can have many of these>'
}
"""
import collections
import copy
import datetime
import fnmatch
import importlib
import logging
import os
import random
import re
import site
import sys
import time
import traceback
import salt.fileclient
import salt.loader
import salt.minion
import salt.pillar
import salt.syspaths as syspaths
import salt.transport.client
import salt.utils.args
import salt.utils.crypt
import salt.utils.data
import salt.utils.decorators.state
import salt.utils.dictupdate
import salt.utils.event
import salt.utils.files
import salt.utils.hashutils
import salt.utils.immutabletypes as immutabletypes
import salt.utils.msgpack
import salt.utils.platform
import salt.utils.process
import salt.utils.url
# Explicit late import to avoid circular import. DO NOT MOVE THIS.
import salt.utils.yamlloader as yamlloader
from salt.exceptions import CommandExecutionError, SaltRenderError, SaltReqTimeoutError
from salt.serializers.msgpack import deserialize as msgpack_deserialize
from salt.serializers.msgpack import serialize as msgpack_serialize
from salt.template import compile_template, compile_template_str
from salt.utils.odict import DefaultOrderedDict, OrderedDict
log = logging.getLogger(__name__)
# These are keywords passed to state module functions which are to be used
# by salt in this state module and not on the actual state module function
STATE_REQUISITE_KEYWORDS = frozenset(
[
"onchanges",
"onchanges_any",
"onfail",
"onfail_any",
"onfail_all",
"onfail_stop",
"prereq",
"prerequired",
"watch",
"watch_any",
"require",
"require_any",
"listen",
]
)
STATE_REQUISITE_IN_KEYWORDS = frozenset(
["onchanges_in", "onfail_in", "prereq_in", "watch_in", "require_in", "listen_in"]
)
STATE_RUNTIME_KEYWORDS = frozenset(
[
"fun",
"state",
"check_cmd",
"failhard",
"onlyif",
"unless",
"creates",
"retry",
"order",
"parallel",
"prereq",
"prereq_in",
"prerequired",
"reload_modules",
"reload_grains",
"reload_pillar",
"runas",
"runas_password",
"fire_event",
"saltenv",
"use",
"use_in",
"__env__",
"__sls__",
"__id__",
"__orchestration_jid__",
"__pub_user",
"__pub_arg",
"__pub_jid",
"__pub_fun",
"__pub_tgt",
"__pub_ret",
"__pub_pid",
"__pub_tgt_type",
"__prereq__",
"__prerequired__",
]
)
STATE_INTERNAL_KEYWORDS = STATE_REQUISITE_KEYWORDS.union(
STATE_REQUISITE_IN_KEYWORDS
).union(STATE_RUNTIME_KEYWORDS)
def _odict_hashable(self):
return id(self)
OrderedDict.__hash__ = _odict_hashable
def split_low_tag(tag):
"""
Take a low tag and split it back into the low dict that it came from
"""
state, id_, name, fun = tag.split("_|-")
return {"state": state, "__id__": id_, "name": name, "fun": fun}
def _gen_tag(low):
"""
Generate the running dict tag string from the low data structure
"""
return "{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}".format(low)
def _clean_tag(tag):
"""
Make tag name safe for filenames
"""
return salt.utils.files.safe_filename_leaf(tag)
def _l_tag(name, id_):
low = {
"name": "listen_{}".format(name),
"__id__": "listen_{}".format(id_),
"state": "Listen_Error",
"fun": "Listen_Error",
}
return _gen_tag(low)
def _calculate_fake_duration():
"""
Generate a NULL duration for when states do not run
but we want the results to be consistent.
"""
utc_start_time = datetime.datetime.utcnow()
local_start_time = utc_start_time - (
datetime.datetime.utcnow() - datetime.datetime.now()
)
utc_finish_time = datetime.datetime.utcnow()
start_time = local_start_time.time().isoformat()
delta = utc_finish_time - utc_start_time
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
return start_time, duration
def get_accumulator_dir(cachedir):
"""
Return the directory that accumulator data is stored in, creating it if it
doesn't exist.
"""
fn_ = os.path.join(cachedir, "accumulator")
if not os.path.isdir(fn_):
# accumulator_dir is not present, create it
os.makedirs(fn_)
return fn_
def trim_req(req):
"""
Trim any function off of a requisite
"""
reqfirst = next(iter(req))
if "." in reqfirst:
return {reqfirst.split(".")[0]: req[reqfirst]}
return req
def state_args(id_, state, high):
"""
Return a set of the arguments passed to the named state
"""
args = set()
if id_ not in high:
return args
if state not in high[id_]:
return args
for item in high[id_][state]:
if not isinstance(item, dict):
continue
if len(item) != 1:
continue
args.add(next(iter(item)))
return args
def find_name(name, state, high):
"""
Scan high data for the id referencing the given name and return a list of (IDs, state) tuples that match
Note: if `state` is sls, then we are looking for all IDs that match the given SLS
"""
ext_id = []
if name in high and state in high[name]:
ext_id.append((name, state))
# if we are requiring an entire SLS, then we need to add ourselves to everything in that SLS
elif state == "sls":
for nid, item in high.items():
if item["__sls__"] == name:
ext_id.append((nid, next(iter(item))))
# otherwise we are requiring a single state, lets find it
else:
# We need to scan for the name
for nid in high:
if state in high[nid]:
if isinstance(high[nid][state], list):
for arg in high[nid][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if arg[next(iter(arg))] == name:
ext_id.append((nid, state))
return ext_id
def find_sls_ids(sls, high):
"""
Scan for all ids in the given sls and return them in a dict; {name: state}
"""
ret = []
for nid, item in high.items():
try:
sls_tgt = item["__sls__"]
except TypeError:
if nid != "__exclude__":
log.error(
"Invalid non-dict item '%s' in high data. Value: %r", nid, item
)
continue
else:
if sls_tgt == sls:
for st_ in item:
if st_.startswith("__"):
continue
ret.append((nid, st_))
return ret
def format_log(ret):
"""
Format the state into a log message
"""
msg = ""
if isinstance(ret, dict):
# Looks like the ret may be a valid state return
if "changes" in ret:
# Yep, looks like a valid state return
chg = ret["changes"]
if not chg:
if ret["comment"]:
msg = ret["comment"]
else:
msg = "No changes made for {0[name]}".format(ret)
elif isinstance(chg, dict):
if "diff" in chg:
if isinstance(chg["diff"], str):
msg = "File changed:\n{}".format(chg["diff"])
if all([isinstance(x, dict) for x in chg.values()]):
if all([("old" in x and "new" in x) for x in chg.values()]):
msg = "Made the following changes:\n"
for pkg in chg:
old = chg[pkg]["old"]
if not old and old not in (False, None):
old = "absent"
new = chg[pkg]["new"]
if not new and new not in (False, None):
new = "absent"
# This must be able to handle unicode as some package names contain
# non-ascii characters like "Français" or "Español". See Issue #33605.
msg += "'{}' changed from '{}' to '{}'\n".format(
pkg, old, new
)
if not msg:
msg = str(ret["changes"])
if ret["result"] is True or ret["result"] is None:
log.info(msg)
else:
log.error(msg)
else:
# catch unhandled data
log.info(str(ret))
def master_compile(master_opts, minion_opts, grains, id_, saltenv):
"""
Compile the master side low state data, and build the hidden state file
"""
st_ = MasterHighState(master_opts, minion_opts, grains, id_, saltenv)
return st_.compile_highstate()
def ishashable(obj):
try:
hash(obj)
except TypeError:
return False
return True
def mock_ret(cdata):
"""
Returns a mocked return dict with information about the run, without
executing the state function
"""
# As this is expanded it should be sent into the execution module
# layer or it should be turned into a standalone loader system
if cdata["args"]:
name = cdata["args"][0]
else:
name = cdata["kwargs"]["name"]
return {
"name": name,
"comment": "Not called, mocked",
"changes": {},
"result": True,
}
class StateError(Exception):
"""
Custom exception class.
"""
class Compiler:
"""
Class used to compile and manage the High Data structure
"""
def __init__(self, opts, renderers):
self.opts = opts
self.rend = renderers
def render_template(self, template, **kwargs):
"""
Enforce the states in a template
"""
high = compile_template(
template,
self.rend,
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
**kwargs
)
if not high:
return high
return self.pad_funcs(high)
def pad_funcs(self, high):
"""
Turns dot delimited function refs into function strings
"""
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], str):
# Is this is a short state? It needs to be padded!
if "." in high[name]:
comps = high[name].split(".")
if len(comps) >= 2:
# Merge the comps
comps[1] = ".".join(comps[1 : len(comps)])
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith("_"):
continue
if not isinstance(high[name][key], list):
continue
if "." in key:
comps = key.split(".")
if len(comps) >= 2:
# Merge the comps
comps[1] = ".".join(comps[1 : len(comps)])
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high
def verify_high(self, high):
"""
Verify that the high data is viable and follows the data structure
"""
errors = []
if not isinstance(high, dict):
errors.append("High data is not a dictionary and is invalid")
reqs = OrderedDict()
for name, body in high.items():
if name.startswith("__"):
continue
if not isinstance(name, str):
errors.append(
"ID '{}' in SLS '{}' is not formed as a string, but is a {}".format(
name, body["__sls__"], type(name).__name__
)
)
if not isinstance(body, dict):
err = "The type {} in {} is not formatted as a dictionary".format(
name, body
)
errors.append(err)
continue
for state in body:
if state.startswith("__"):
continue
if not isinstance(body[state], list):
errors.append(
"State '{}' in SLS '{}' is not formed as a list".format(
name, body["__sls__"]
)
)
else:
fun = 0
if "." in state:
fun += 1
for arg in body[state]:
if isinstance(arg, str):
fun += 1
if " " in arg.strip():
errors.append(
'The function "{}" in state '
'"{}" in SLS "{}" has '
"whitespace, a function with whitespace is "
"not supported, perhaps this is an argument "
'that is missing a ":"'.format(
arg, name, body["__sls__"]
)
)
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst in ("require", "watch", "prereq", "onchanges"):
if not isinstance(arg[argfirst], list):
errors.append(
"The {} statement in state '{}' in SLS '{}' "
"needs to be formed as a list".format(
argfirst, name, body["__sls__"]
)
)
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = {"state": state}
for req in arg[argfirst]:
if isinstance(req, str):
req = {"id": req}
if not isinstance(req, dict):
errors.append(
"Requisite declaration {} in SLS {} "
"is not formed as a single key "
"dictionary".format(
req, body["__sls__"]
)
)
continue
req_key = next(iter(req))
req_val = req[req_key]
if "." in req_key:
errors.append(
"Invalid requisite type '{}' "
"in state '{}', in SLS "
"'{}'. Requisite types must "
"not contain dots, did you "
"mean '{}'?".format(
req_key,
name,
body["__sls__"],
req_key[: req_key.find(".")],
)
)
if not ishashable(req_val):
errors.append(
'Illegal requisite "{}", is SLS {}\n'.format(
str(req_val),
body["__sls__"],
)
)
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if (
reqs[req_val]["state"]
== reqs[name][req_val]
):
errors.append(
"A recursive requisite was"
' found, SLS "{}" ID "{}"'
' ID "{}"'.format(
body["__sls__"],
name,
req_val,
)
)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(
"Multiple dictionaries defined in argument "
"of state '{}' in SLS '{}'".format(
name, body["__sls__"]
)
)
if not fun:
if state == "require" or state == "watch":
continue
errors.append(
"No function declared in state '{}' in SLS '{}'".format(
state, body["__sls__"]
)
)
elif fun > 1:
errors.append(
"Too many functions declared in state '{}' in "
"SLS '{}'".format(state, body["__sls__"])
)
return errors
def order_chunks(self, chunks):
"""
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
"""
cap = 1
for chunk in chunks:
if "order" in chunk:
if not isinstance(chunk["order"], int):
continue
chunk_order = chunk["order"]
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if "order" not in chunk:
chunk["order"] = cap
continue
if not isinstance(chunk["order"], (int, float)):
if chunk["order"] == "last":
chunk["order"] = cap + 1000000
elif chunk["order"] == "first":
chunk["order"] = 0
else:
chunk["order"] = cap
if "name_order" in chunk:
chunk["order"] = chunk["order"] + chunk.pop("name_order") / 10000.0
if chunk["order"] < 0:
chunk["order"] = cap + 1000000 + chunk["order"]
chunk["name"] = salt.utils.data.decode(chunk["name"])
chunks.sort(
key=lambda chunk: (
chunk["order"],
"{0[state]}{0[name]}{0[fun]}".format(chunk),
)
)
return chunks
def compile_high_data(self, high):
"""
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
"""
chunks = []
for name, body in high.items():
if name.startswith("__"):
continue
for state, run in body.items():
funcs = set()
names = []
if state.startswith("__"):
continue
chunk = {"state": state, "name": name}
if "__sls__" in body:
chunk["__sls__"] = body["__sls__"]
if "__env__" in body:
chunk["__env__"] = body["__env__"]
chunk["__id__"] = name
for arg in run:
if isinstance(arg, str):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in arg.items():
if key == "names":
for _name in val:
if _name not in names:
names.append(_name)
continue
else:
chunk.update(arg)
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(iter(entry.keys()))
live["name"] = low_name
list(map(live.update, entry[low_name]))
else:
live["name"] = entry
live["name_order"] = name_order
name_order = name_order + 1
for fun in funcs:
live["fun"] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live["fun"] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def apply_exclude(self, high):
"""
Read in the __exclude__ list and remove all excluded objects from the
high data
"""
if "__exclude__" not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop("__exclude__")
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(iter(exc.keys()))
if key == "sls":
ex_sls.add(exc["sls"])
elif key == "id":
ex_id.add(exc["id"])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associtaed ids
for name, body in high.items():
if name.startswith("__"):
continue
if body.get("__sls__", "") in ex_sls:
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
class State:
"""
Class used to execute salt states
"""
def __init__(
self,
opts,
pillar_override=None,
jid=None,
pillar_enc=None,
proxy=None,
context=None,
mocked=False,
loader="states",
initial_pillar=None,
):
self.states_loader = loader
if "grains" not in opts:
opts["grains"] = salt.loader.grains(opts)
self.opts = opts
self.proxy = proxy
self._pillar_override = pillar_override
if pillar_enc is not None:
try:
pillar_enc = pillar_enc.lower()
except AttributeError:
pillar_enc = str(pillar_enc).lower()
self._pillar_enc = pillar_enc
log.debug("Gathering pillar data for state run")
if initial_pillar and not self._pillar_override:
self.opts["pillar"] = initial_pillar
else:
# Compile pillar data
self.opts["pillar"] = self._gather_pillar()
# Reapply overrides on top of compiled pillar
if self._pillar_override:
self.opts["pillar"] = salt.utils.dictupdate.merge(
self.opts["pillar"],
self._pillar_override,
self.opts.get("pillar_source_merging_strategy", "smart"),
self.opts.get("renderer", "yaml"),
self.opts.get("pillar_merge_lists", False),
)
log.debug("Finished gathering pillar data for state run")
self.state_con = context or {}
self.load_modules()
self.active = set()
self.mod_init = set()
self.pre = {}
self.__run_num = 0
self.jid = jid
self.instance_id = str(id(self))
self.inject_globals = {}
self.mocked = mocked
def _gather_pillar(self):
"""
Whenever a state run starts, gather the pillar data fresh
"""
if self._pillar_override:
if self._pillar_enc:
try:
self._pillar_override = salt.utils.crypt.decrypt(
self._pillar_override,
self._pillar_enc,
translate_newlines=True,
renderers=getattr(self, "rend", None),
opts=self.opts,
valid_rend=self.opts["decrypt_pillar_renderers"],
)
except Exception as exc: # pylint: disable=broad-except
log.error("Failed to decrypt pillar override: %s", exc)
if isinstance(self._pillar_override, str):
# This can happen if an entire pillar dictionary was passed as
# a single encrypted string. The override will have been
# decrypted above, and should now be a stringified dictionary.
# Use the YAML loader to convert that to a Python dictionary.
try:
self._pillar_override = yamlloader.load(
self._pillar_override, Loader=yamlloader.SaltYamlSafeLoader
)
except Exception as exc: # pylint: disable=broad-except
log.error("Failed to load CLI pillar override")
log.exception(exc)
if not isinstance(self._pillar_override, dict):
log.error("Pillar override was not passed as a dictionary")
self._pillar_override = None
pillar = salt.pillar.get_pillar(
self.opts,
self.opts["grains"],
self.opts["id"],
self.opts["saltenv"],
pillar_override=self._pillar_override,
pillarenv=self.opts.get("pillarenv"),
)
return pillar.compile_pillar()
def _mod_init(self, low):
"""
Check the module initialization function, if this is the first run
of a state package that has a mod_init function, then execute the
mod_init function in the state module.
"""
# ensure that the module is loaded
try:
self.states[
"{}.{}".format(low["state"], low["fun"])
] # pylint: disable=W0106
except KeyError:
return
minit = "{}.mod_init".format(low["state"])
if low["state"] not in self.mod_init:
if minit in self.states._dict:
mret = self.states[minit](low)
if not mret:
return
self.mod_init.add(low["state"])
def _aggregate_requisites(self, low, chunks):
"""
Aggregate the requisites
"""
requisites = {}
low_state = low["state"]
for chunk in chunks:
# if the state function in the chunk matches
# the state function in the low we're looking at
# and __agg__ is True, add the requisites from the
# chunk to those in the low.
if chunk["state"] == low["state"] and chunk.get("__agg__"):
for req in frozenset.union(
*[STATE_REQUISITE_KEYWORDS, STATE_REQUISITE_IN_KEYWORDS]
):
if req in chunk:
if req in requisites:
requisites[req].extend(chunk[req])
else:
requisites[req] = chunk[req]
low.update(requisites)
return low
def _mod_aggregate(self, low, running, chunks):
"""
Execute the aggregation systems to runtime modify the low chunk
"""
agg_opt = self.functions["config.option"]("state_aggregate")
if "aggregate" in low:
agg_opt = low["aggregate"]
if agg_opt is True:
agg_opt = [low["state"]]
elif not isinstance(agg_opt, list):
return low
if low["state"] in agg_opt and not low.get("__agg__"):
agg_fun = "{}.mod_aggregate".format(low["state"])
if agg_fun in self.states:
try:
low = self.states[agg_fun](low, chunks, running)
low = self._aggregate_requisites(low, chunks)
low["__agg__"] = True
except TypeError:
log.error("Failed to execute aggregate for state %s", low["state"])
return low
def _run_check(self, low_data):
"""
Check that unless doesn't return 0, and that onlyif returns a 0.
"""
ret = {"result": False, "comment": []}
cmd_opts = {}
# Set arguments from cmd.run state as appropriate
POSSIBLE_CMD_ARGS = (
"cwd",
"root",
"runas",
"env",
"prepend_path",
"umask",
"timeout",
"success_retcodes",
)
for run_cmd_arg in POSSIBLE_CMD_ARGS:
cmd_opts[run_cmd_arg] = low_data.get(run_cmd_arg)
if "shell" in low_data:
shell = low_data["shell"]
elif "shell" in self.opts["grains"]:
shell = self.opts["grains"].get("shell")
else:
shell = None
# /sbin/nologin always causes the onlyif / unless cmd to fail
if shell is not None and shell != "/sbin/nologin":
cmd_opts["shell"] = shell
if "onlyif" in low_data:
_ret = self._run_check_onlyif(low_data, cmd_opts)
ret["result"] = _ret["result"]
ret["comment"].append(_ret["comment"])
if "skip_watch" in _ret:
ret["skip_watch"] = _ret["skip_watch"]
if "unless" in low_data:
_ret = self._run_check_unless(low_data, cmd_opts)
# If either result is True, the returned result should be True
ret["result"] = _ret["result"] or ret["result"]
ret["comment"].append(_ret["comment"])
if "skip_watch" in _ret:
# If either result is True, the returned result should be True
ret["skip_watch"] = _ret["skip_watch"] or ret["skip_watch"]
if "creates" in low_data:
_ret = self._run_check_creates(low_data)
ret["result"] = _ret["result"] or ret["result"]
ret["comment"].append(_ret["comment"])
if "skip_watch" in _ret:
# If either result is True, the returned result should be True
ret["skip_watch"] = _ret["skip_watch"] or ret["skip_watch"]
return ret
def _run_check_function(self, entry):
"""Format slot args and run unless/onlyif function."""
fun = entry.pop("fun")
args = entry.pop("args") if "args" in entry else []
cdata = {"args": args, "kwargs": entry}
self.format_slots(cdata)
return self.functions[fun](*cdata["args"], **cdata["kwargs"])
def _run_check_onlyif(self, low_data, cmd_opts):
"""
Make sure that all commands return True for the state to run. If any
command returns False (non 0), the state will not run
"""
ret = {"result": False}
if not isinstance(low_data["onlyif"], list):
low_data_onlyif = [low_data["onlyif"]]
else:
low_data_onlyif = low_data["onlyif"]
# If any are False the state will NOT run
def _check_cmd(cmd):
# Don't run condition (False)
if cmd != 0 and ret["result"] is False:
ret.update(
{
"comment": "onlyif condition is false",
"skip_watch": True,
"result": True,
}
)
return False
elif cmd == 0:
ret.update({"comment": "onlyif condition is true", "result": False})
return True
for entry in low_data_onlyif:
if isinstance(entry, str):
try:
cmd = self.functions["cmd.retcode"](
entry, ignore_retcode=True, python_shell=True, **cmd_opts
)
except CommandExecutionError:
# Command failed, notify onlyif to skip running the item
cmd = 100
log.debug("Last command return code: %s", cmd)
if not _check_cmd(cmd):
return ret
elif isinstance(entry, dict):
if "fun" not in entry:
ret["comment"] = "no `fun` argument in onlyif: {}".format(entry)
log.warning(ret["comment"])
return ret
get_return = entry.pop("get_return", None)
result = self._run_check_function(entry)
if get_return:
result = salt.utils.data.traverse_dict_and_list(result, get_return)
if self.state_con.get("retcode", 0):
if not _check_cmd(self.state_con["retcode"]):
return ret
elif not result:
ret.update(
{
"comment": "onlyif condition is false",
"skip_watch": True,
"result": True,
}
)
return ret
else:
ret.update({"comment": "onlyif condition is true", "result": False})
else:
ret.update(
{
"comment": "onlyif execution failed, bad type passed",
"result": False,
}
)
return ret
return ret
def _run_check_unless(self, low_data, cmd_opts):
"""
Check if any of the commands return False (non 0). If any are False the
state will run.
"""
ret = {"result": False}
if not isinstance(low_data["unless"], list):
low_data_unless = [low_data["unless"]]
else:
low_data_unless = low_data["unless"]
# If any are False the state will run
def _check_cmd(cmd):
# Don't run condition (True)
if cmd == 0:
ret.update(
{
"comment": "unless condition is true",
"skip_watch": True,
"result": True,
}
)
return False
else:
ret.pop("skip_watch", None)
ret.update({"comment": "unless condition is false", "result": False})
return True
for entry in low_data_unless:
if isinstance(entry, str):
try:
cmd = self.functions["cmd.retcode"](
entry, ignore_retcode=True, python_shell=True, **cmd_opts
)
log.debug("Last command return code: %s", cmd)
except CommandExecutionError:
# Command failed, so notify unless to skip the item
cmd = 0
if _check_cmd(cmd):
return ret
elif isinstance(entry, dict):
if "fun" not in entry:
ret["comment"] = "no `fun` argument in unless: {}".format(entry)
log.warning(ret["comment"])
return ret
get_return = entry.pop("get_return", None)
result = self._run_check_function(entry)
if get_return:
result = salt.utils.data.traverse_dict_and_list(result, get_return)
if self.state_con.get("retcode", 0):
if _check_cmd(self.state_con["retcode"]):
return ret
elif result:
ret.update(
{
"comment": "unless condition is true",
"skip_watch": True,
"result": True,
}
)
else:
ret.update(
{"comment": "unless condition is false", "result": False}
)
return ret
else:
ret.update(
{
"comment": "unless condition is false, bad type passed",
"result": False,
}
)
# No reason to stop, return ret
return ret
def _run_check_cmd(self, low_data):
"""
Alter the way a successful state run is determined
"""
ret = {"result": False}
cmd_opts = {}
if "shell" in self.opts["grains"]:
cmd_opts["shell"] = self.opts["grains"].get("shell")
for entry in low_data["check_cmd"]:
cmd = self.functions["cmd.retcode"](
entry, ignore_retcode=True, python_shell=True, **cmd_opts
)
log.debug("Last command return code: %s", cmd)
if cmd == 0 and ret["result"] is False:
ret.update(
{
"comment": "check_cmd determined the state succeeded",
"result": True,
}
)
elif cmd != 0:
ret.update(
{
"comment": "check_cmd determined the state failed",
"result": False,
}
)
return ret
return ret
def _run_check_creates(self, low_data):
"""
Check that listed files exist
"""
ret = {"result": False}
if isinstance(low_data["creates"], str) and os.path.exists(low_data["creates"]):
ret["comment"] = "{} exists".format(low_data["creates"])
ret["result"] = True
ret["skip_watch"] = True
elif isinstance(low_data["creates"], list) and all(
[os.path.exists(path) for path in low_data["creates"]]
):
ret["comment"] = "All files in creates exist"
ret["result"] = True
ret["skip_watch"] = True
else:
ret["comment"] = "Creates files not found"
ret["result"] = False
return ret
def reset_run_num(self):
"""
Rest the run_num value to 0
"""
self.__run_num = 0
def _load_states(self):
"""
Read the state loader value and loadup the correct states subsystem
"""
if self.states_loader == "thorium":
self.states = salt.loader.thorium(
self.opts, self.functions, {}
) # TODO: Add runners, proxy?
else:
self.states = salt.loader.states(
self.opts,
self.functions,
self.utils,
self.serializers,
context=self.state_con,
proxy=self.proxy,
)
def load_modules(self, data=None, proxy=None):
"""
Load the modules into the state
"""
log.info("Loading fresh modules for state activity")
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts, self.state_con, utils=self.utils, proxy=self.proxy
)
if isinstance(data, dict):
if data.get("provider", False):
if isinstance(data["provider"], str):
providers = [{data["state"]: data["provider"]}]
elif isinstance(data["provider"], list):
providers = data["provider"]
else:
providers = {}
for provider in providers:
for mod in provider:
funcs = salt.loader.raw_mod(
self.opts, provider[mod], self.functions
)
if funcs:
for func in funcs:
f_key = "{}{}".format(mod, func[func.rindex(".") :])
self.functions[f_key] = funcs[func]
self.serializers = salt.loader.serializers(self.opts)
self._load_states()
self.rend = salt.loader.render(
self.opts,
self.functions,
states=self.states,
proxy=self.proxy,
context=self.state_con,
)
def module_refresh(self):
"""
Refresh all the modules
"""
log.debug("Refreshing modules...")
if self.opts["grains"].get("os") != "MacOS":
# In case a package has been installed into the current python
# process 'site-packages', the 'site' module needs to be reloaded in
# order for the newly installed package to be importable.
try:
importlib.reload(site)
except RuntimeError:
log.error(
"Error encountered during module reload. Modules were not reloaded."
)
except TypeError:
log.error(
"Error encountered during module reload. Modules were not reloaded."
)
self.load_modules()
if not self.opts.get("local", False) and self.opts.get("multiprocessing", True):
self.functions["saltutil.refresh_modules"]()
def check_refresh(self, data, ret):
"""
Check to see if the modules for this state instance need to be updated,
only update if the state is a file or a package and if it changed
something. If the file function is managed check to see if the file is a
possible module type, e.g. a python, pyx, or .so. Always refresh if the
function is recurse, since that can lay down anything.
"""
_reload_modules = False
if data.get("reload_grains", False):
log.debug("Refreshing grains...")
self.opts["grains"] = salt.loader.grains(self.opts)
_reload_modules = True
if data.get("reload_pillar", False):
log.debug("Refreshing pillar...")
self.opts["pillar"] = self._gather_pillar()
_reload_modules = True
if not ret["changes"]:
if data.get("force_reload_modules", False):
self.module_refresh()
return
if data.get("reload_modules", False) or _reload_modules:
# User explicitly requests a reload
self.module_refresh()
return
if data["state"] == "file":
if data["fun"] == "managed":
if data["name"].endswith((".py", ".pyx", ".pyo", ".pyc", ".so")):
self.module_refresh()
elif data["fun"] == "recurse":
self.module_refresh()
elif data["fun"] == "symlink":
if "bin" in data["name"]:
self.module_refresh()
elif data["state"] in ("pkg", "ports", "pip"):
self.module_refresh()
def verify_data(self, data):
"""
Verify the data, return an error statement if something is wrong
"""
errors = []
if "state" not in data:
errors.append('Missing "state" data')
if "fun" not in data:
errors.append('Missing "fun" data')
if "name" not in data:
errors.append('Missing "name" data')
if data["name"] and not isinstance(data["name"], str):
errors.append(
"ID '{}' {}is not formed as a string, but is a {}".format(
data["name"],
"in SLS '{}' ".format(data["__sls__"]) if "__sls__" in data else "",
type(data["name"]).__name__,
)
)
if errors:
return errors
full = data["state"] + "." + data["fun"]
if full not in self.states:
if "__sls__" in data:
errors.append(
"State '{}' was not found in SLS '{}'".format(full, data["__sls__"])
)
reason = self.states.missing_fun_string(full)
if reason:
errors.append("Reason: {}".format(reason))
else:
errors.append("Specified state '{}' was not found".format(full))
else:
# First verify that the parameters are met
aspec = salt.utils.args.get_function_argspec(self.states[full])
arglen = 0
deflen = 0
if isinstance(aspec.args, list):
arglen = len(aspec.args)
if isinstance(aspec.defaults, tuple):
deflen = len(aspec.defaults)
for ind in range(arglen - deflen):
if aspec.args[ind] not in data:
errors.append(
"Missing parameter {} for state {}".format(
aspec.args[ind], full
)
)
# If this chunk has a recursive require, then it will cause a
# recursive loop when executing, check for it
reqdec = ""
if "require" in data:
reqdec = "require"
if "watch" in data:
# Check to see if the service has a mod_watch function, if it does
# not, then just require
# to just require extend the require statement with the contents
# of watch so that the mod_watch function is not called and the
# requisite capability is still used
if "{}.mod_watch".format(data["state"]) not in self.states:
if "require" in data:
data["require"].extend(data.pop("watch"))
else:
data["require"] = data.pop("watch")
reqdec = "require"
else:
reqdec = "watch"
if reqdec:
for req in data[reqdec]:
reqfirst = next(iter(req))
if data["state"] == reqfirst:
if fnmatch.fnmatch(data["name"], req[reqfirst]) or fnmatch.fnmatch(
data["__id__"], req[reqfirst]
):
errors.append(
"Recursive require detected in SLS {} for "
"require {} in ID {}".format(
data["__sls__"], req, data["__id__"]
)
)
return errors
def verify_high(self, high):
"""
Verify that the high data is viable and follows the data structure
"""
errors = []
if not isinstance(high, dict):
errors.append("High data is not a dictionary and is invalid")
reqs = OrderedDict()
for name, body in high.items():
try:
if name.startswith("__"):
continue
except AttributeError:
pass
if not isinstance(name, str):
errors.append(
"ID '{}' in SLS '{}' is not formed as a string, but "
"is a {}. It may need to be quoted.".format(
name, body["__sls__"], type(name).__name__
)
)
if not isinstance(body, dict):
err = "The type {} in {} is not formatted as a dictionary".format(
name, body
)
errors.append(err)
continue
for state in body:
if state.startswith("__"):
continue
if body[state] is None:
errors.append(
"ID '{}' in SLS '{}' contains a short declaration "
"({}) with a trailing colon. When not passing any "
"arguments to a state, the colon must be omitted.".format(
name, body["__sls__"], state
)
)
continue
if not isinstance(body[state], list):
errors.append(
"State '{}' in SLS '{}' is not formed as a list".format(
name, body["__sls__"]
)
)
else:
fun = 0
if "." in state:
fun += 1
for arg in body[state]:
if isinstance(arg, str):
fun += 1
if " " in arg.strip():
errors.append(
'The function "{}" in state "{}" in SLS "{}" has '
"whitespace, a function with whitespace is not "
"supported, perhaps this is an argument that is "
'missing a ":"'.format(arg, name, body["__sls__"])
)
elif isinstance(arg, dict):
# The arg is a dict, if the arg is require or
# watch, it must be a list.
#
# Add the requires to the reqs dict and check them
# all for recursive requisites.
argfirst = next(iter(arg))
if argfirst == "names":
if not isinstance(arg[argfirst], list):
errors.append(
"The 'names' argument in state "
"'{}' in SLS '{}' needs to be "
"formed as a list".format(name, body["__sls__"])
)
if argfirst in ("require", "watch", "prereq", "onchanges"):
if not isinstance(arg[argfirst], list):
errors.append(
"The {} statement in state '{}' in "
"SLS '{}' needs to be formed as a "
"list".format(argfirst, name, body["__sls__"])
)
# It is a list, verify that the members of the
# list are all single key dicts.
else:
reqs[name] = OrderedDict(state=state)
for req in arg[argfirst]:
if isinstance(req, str):
req = {"id": req}
if not isinstance(req, dict):
errors.append(
"Requisite declaration {} in SLS {} is"
" not formed as a single key dictionary".format(
req, body["__sls__"]
)
)
continue
req_key = next(iter(req))
req_val = req[req_key]
if "." in req_key:
errors.append(
"Invalid requisite type '{}' "
"in state '{}', in SLS "
"'{}'. Requisite types must "
"not contain dots, did you "
"mean '{}'?".format(
req_key,
name,
body["__sls__"],
req_key[: req_key.find(".")],
)
)
if not ishashable(req_val):
errors.append(
'Illegal requisite "{}", please check '
"your syntax.\n".format(req_val)
)
continue
# Check for global recursive requisites
reqs[name][req_val] = req_key
# I am going beyond 80 chars on
# purpose, this is just too much
# of a pain to deal with otherwise
if req_val in reqs:
if name in reqs[req_val]:
if reqs[req_val][name] == state:
if (
reqs[req_val]["state"]
== reqs[name][req_val]
):
errors.append(
"A recursive requisite was"
' found, SLS "{}" ID "{}"'
' ID "{}"'.format(
body["__sls__"],
name,
req_val,
)
)
# Make sure that there is only one key in the
# dict
if len(list(arg)) != 1:
errors.append(
"Multiple dictionaries defined in "
"argument of state '{}' in SLS '{}'".format(
name, body["__sls__"]
)
)
if not fun:
if state == "require" or state == "watch":
continue
errors.append(
"No function declared in state '{}' in SLS '{}'".format(
state, body["__sls__"]
)
)
elif fun > 1:
errors.append(
"Too many functions declared in state '{}' in "
"SLS '{}'".format(state, body["__sls__"])
)
return errors
def verify_chunks(self, chunks):
"""
Verify the chunks in a list of low data structures
"""
err = []
for chunk in chunks:
err.extend(self.verify_data(chunk))
return err
def order_chunks(self, chunks):
"""
Sort the chunk list verifying that the chunks follow the order
specified in the order options.
"""
cap = 1
for chunk in chunks:
if "order" in chunk:
if not isinstance(chunk["order"], int):
continue
chunk_order = chunk["order"]
if chunk_order > cap - 1 and chunk_order > 0:
cap = chunk_order + 100
for chunk in chunks:
if "order" not in chunk:
chunk["order"] = cap
continue
if not isinstance(chunk["order"], (int, float)):
if chunk["order"] == "last":
chunk["order"] = cap + 1000000
elif chunk["order"] == "first":
chunk["order"] = 0
else:
chunk["order"] = cap
if "name_order" in chunk:
chunk["order"] = chunk["order"] + chunk.pop("name_order") / 10000.0
if chunk["order"] < 0:
chunk["order"] = cap + 1000000 + chunk["order"]
chunks.sort(
key=lambda chunk: (
chunk["order"],
"{0[state]}{0[name]}{0[fun]}".format(chunk),
)
)
return chunks
def compile_high_data(self, high, orchestration_jid=None):
"""
"Compile" the high data as it is retrieved from the CLI or YAML into
the individual state executor structures
"""
chunks = []
for name, body in high.items():
if name.startswith("__"):
continue
for state, run in body.items():
funcs = set()
names = []
if state.startswith("__"):
continue
chunk = OrderedDict()
chunk["state"] = state
chunk["name"] = name
if orchestration_jid is not None:
chunk["__orchestration_jid__"] = orchestration_jid
if "__sls__" in body:
chunk["__sls__"] = body["__sls__"]
if "__env__" in body:
chunk["__env__"] = body["__env__"]
chunk["__id__"] = name
for arg in run:
if isinstance(arg, str):
funcs.add(arg)
continue
if isinstance(arg, dict):
for key, val in arg.items():
if key == "names":
for _name in val:
if _name not in names:
names.append(_name)
elif key == "state":
# Don't pass down a state override
continue
elif key == "name" and not isinstance(val, str):
# Invalid name, fall back to ID
chunk[key] = name
else:
chunk[key] = val
if names:
name_order = 1
for entry in names:
live = copy.deepcopy(chunk)
if isinstance(entry, dict):
low_name = next(iter(entry.keys()))
live["name"] = low_name
list(map(live.update, entry[low_name]))
else:
live["name"] = entry
live["name_order"] = name_order
name_order += 1
for fun in funcs:
live["fun"] = fun
chunks.append(live)
else:
live = copy.deepcopy(chunk)
for fun in funcs:
live["fun"] = fun
chunks.append(live)
chunks = self.order_chunks(chunks)
return chunks
def reconcile_extend(self, high):
"""
Pull the extend data and add it to the respective high data
"""
errors = []
if "__extend__" not in high:
return high, errors
ext = high.pop("__extend__")
for ext_chunk in ext:
for name, body in ext_chunk.items():
state_type = next(x for x in body if not x.startswith("__"))
if name not in high or state_type not in high[name]:
# Check for a matching 'name' override in high data
ids = find_name(name, state_type, high)
if len(ids) != 1:
errors.append(
"Cannot extend ID '{0}' in '{1}:{2}'. It is not "
"part of the high state.\n"
"This is likely due to a missing include statement "
"or an incorrectly typed ID.\nEnsure that a "
"state with an ID of '{0}' is available\nin "
"environment '{1}' and to SLS '{2}'".format(
name,
body.get("__env__", "base"),
body.get("__sls__", "base"),
)
)
continue
else:
name = ids[0][0]
for state, run in body.items():
if state.startswith("__"):
continue
if state not in high[name]:
high[name][state] = run
continue
# high[name][state] is extended by run, both are lists
for arg in run:
update = False
for hind, val in enumerate(high[name][state]):
if isinstance(arg, str) and isinstance(val, str):
# replacing the function, replace the index
high[name][state].pop(hind)
high[name][state].insert(hind, arg)
update = True
continue
if isinstance(arg, dict) and isinstance(val, dict):
# It is an option, make sure the options match
argfirst = next(iter(arg))
if argfirst == next(iter(high[name][state][hind])):
# If argfirst is a requisite then we must merge
# our requisite with that of the target state
if argfirst in STATE_REQUISITE_KEYWORDS:
high[name][state][hind][argfirst].extend(
arg[argfirst]
)
# otherwise, its not a requisite and we are just extending (replacing)
else:
high[name][state][hind] = arg
update = True
if (
argfirst == "name"
and next(iter(high[name][state][hind])) == "names"
):
# If names are overwritten by name use the name
high[name][state][hind] = arg
if not update:
high[name][state].append(arg)
return high, errors
def apply_exclude(self, high):
"""
Read in the __exclude__ list and remove all excluded objects from the
high data
"""
if "__exclude__" not in high:
return high
ex_sls = set()
ex_id = set()
exclude = high.pop("__exclude__")
for exc in exclude:
if isinstance(exc, str):
# The exclude statement is a string, assume it is an sls
ex_sls.add(exc)
if isinstance(exc, dict):
# Explicitly declared exclude
if len(exc) != 1:
continue
key = next(iter(exc.keys()))
if key == "sls":
ex_sls.add(exc["sls"])
elif key == "id":
ex_id.add(exc["id"])
# Now the excludes have been simplified, use them
if ex_sls:
# There are sls excludes, find the associated ids
for name, body in high.items():
if name.startswith("__"):
continue
sls = body.get("__sls__", "")
if not sls:
continue
for ex_ in ex_sls:
if fnmatch.fnmatch(sls, ex_):
ex_id.add(name)
for id_ in ex_id:
if id_ in high:
high.pop(id_)
return high
def requisite_in(self, high):
"""
Extend the data reference with requisite_in arguments
"""
req_in = {
"require_in",
"watch_in",
"onfail_in",
"onchanges_in",
"use",
"use_in",
"prereq",
"prereq_in",
}
req_in_all = req_in.union(
{"require", "watch", "onfail", "onfail_stop", "onchanges"}
)
extend = {}
errors = []
disabled_reqs = self.opts.get("disabled_requisites", [])
if not isinstance(disabled_reqs, list):
disabled_reqs = [disabled_reqs]
for id_, body in high.items():
if not isinstance(body, dict):
continue
for state, run in body.items():
if state.startswith("__"):
continue
for arg in run:
if isinstance(arg, dict):
# It is not a function, verify that the arg is a
# requisite in statement
if len(arg) < 1:
# Empty arg dict
# How did we get this far?
continue
# Split out the components
key = next(iter(arg))
if key not in req_in:
continue
if key in disabled_reqs:
log.warning(
"The %s requisite has been disabled, Ignoring.", key
)
continue
rkey = key.split("_")[0]
items = arg[key]
if isinstance(items, dict):
# Formatted as a single req_in
for _state, name in items.items():
# Not a use requisite_in
found = False
if name not in extend:
extend[name] = OrderedDict()
if "." in _state:
errors.append(
"Invalid requisite in {}: {} for "
"{}, in SLS '{}'. Requisites must "
"not contain dots, did you mean '{}'?".format(
rkey,
_state,
name,
body["__sls__"],
_state[: _state.find(".")],
)
)
_state = _state.split(".")[0]
if _state not in extend[name]:
extend[name][_state] = []
extend[name]["__env__"] = body["__env__"]
extend[name]["__sls__"] = body["__sls__"]
for ind in range(len(extend[name][_state])):
if next(iter(extend[name][_state][ind])) == rkey:
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append({rkey: [{state: id_}]})
if isinstance(items, list):
# Formed as a list of requisite additions
hinges = []
for ind in items:
if not isinstance(ind, dict):
# Malformed req_in
if ind in high:
_ind_high = [
x
for x in high[ind]
if not x.startswith("__")
]
ind = {_ind_high[0]: ind}
else:
found = False
for _id in iter(high):
for state in [
state
for state in iter(high[_id])
if not state.startswith("__")
]:
for j in iter(high[_id][state]):
if (
isinstance(j, dict)
and "name" in j
):
if j["name"] == ind:
ind = {state: _id}
found = True
if not found:
continue
if len(ind) < 1:
continue
pstate = next(iter(ind))
pname = ind[pstate]
if pstate == "sls":
# Expand hinges here
hinges = find_sls_ids(pname, high)
else:
hinges.append((pname, pstate))
if "." in pstate:
errors.append(
"Invalid requisite in {}: {} for "
"{}, in SLS '{}'. Requisites must "
"not contain dots, did you mean '{}'?".format(
rkey,
pstate,
pname,
body["__sls__"],
pstate[: pstate.find(".")],
)
)
pstate = pstate.split(".")[0]
for tup in hinges:
name, _state = tup
if key == "prereq_in":
# Add prerequired to origin
if id_ not in extend:
extend[id_] = OrderedDict()
if state not in extend[id_]:
extend[id_][state] = []
extend[id_][state].append(
{"prerequired": [{_state: name}]}
)
if key == "prereq":
# Add prerequired to prereqs
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if ext_id not in extend:
extend[ext_id] = OrderedDict()
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
extend[ext_id][_req_state].append(
{"prerequired": [{state: id_}]}
)
continue
if key == "use_in":
# Add the running states args to the
# use_in states
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
ext_args = state_args(ext_id, _state, high)
if ext_id not in extend:
extend[ext_id] = OrderedDict()
if _req_state not in extend[ext_id]:
extend[ext_id][_req_state] = []
ignore_args = req_in_all.union(ext_args)
for arg in high[id_][state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(iter(arg.keys())) == "name":
continue
if next(iter(arg.keys())) == "names":
continue
extend[ext_id][_req_state].append(arg)
continue
if key == "use":
# Add the use state's args to the
# running state
ext_ids = find_name(name, _state, high)
for ext_id, _req_state in ext_ids:
if not ext_id:
continue
loc_args = state_args(id_, state, high)
if id_ not in extend:
extend[id_] = OrderedDict()
if state not in extend[id_]:
extend[id_][state] = []
ignore_args = req_in_all.union(loc_args)
for arg in high[ext_id][_req_state]:
if not isinstance(arg, dict):
continue
if len(arg) != 1:
continue
if next(iter(arg)) in ignore_args:
continue
# Don't use name or names
if next(iter(arg.keys())) == "name":
continue
if next(iter(arg.keys())) == "names":
continue
extend[id_][state].append(arg)
continue
found = False
if name not in extend:
extend[name] = OrderedDict()
if _state not in extend[name]:
extend[name][_state] = []
extend[name]["__env__"] = body["__env__"]
extend[name]["__sls__"] = body["__sls__"]
for ind in range(len(extend[name][_state])):
if (
next(iter(extend[name][_state][ind]))
== rkey
):
# Extending again
extend[name][_state][ind][rkey].append(
{state: id_}
)
found = True
if found:
continue
# The rkey is not present yet, create it
extend[name][_state].append({rkey: [{state: id_}]})
high["__extend__"] = []
for key, val in extend.items():
high["__extend__"].append({key: val})
req_in_high, req_in_errors = self.reconcile_extend(high)
errors.extend(req_in_errors)
return req_in_high, errors
def _call_parallel_target(self, name, cdata, low):
"""
The target function to call that will create the parallel thread/process
"""
# we need to re-record start/end duration here because it is impossible to
# correctly calculate further down the chain
utc_start_time = datetime.datetime.utcnow()
self.format_slots(cdata)
tag = _gen_tag(low)
try:
ret = self.states[cdata["full"]](*cdata["args"], **cdata["kwargs"])
except Exception as exc: # pylint: disable=broad-except
log.debug(
"An exception occurred in this state: %s",
exc,
exc_info_on_loglevel=logging.DEBUG,
)
trb = traceback.format_exc()
ret = {
"result": False,
"name": name,
"changes": {},
"comment": "An exception occurred in this state: {}".format(trb),
}
utc_finish_time = datetime.datetime.utcnow()
delta = utc_finish_time - utc_start_time
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
ret["duration"] = duration
troot = os.path.join(self.opts["cachedir"], self.jid)
tfile = os.path.join(troot, salt.utils.hashutils.sha1_digest(tag))
if not os.path.isdir(troot):
try:
os.makedirs(troot)
except OSError:
# Looks like the directory was created between the check
# and the attempt, we are safe to pass
pass
with salt.utils.files.fopen(tfile, "wb+") as fp_:
fp_.write(msgpack_serialize(ret))
def call_parallel(self, cdata, low):
"""
Call the state defined in the given cdata in parallel
"""
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
name = (cdata.get("args") or [None])[0] or cdata["kwargs"].get("name")
if not name:
name = low.get("name", low.get("__id__"))
proc = salt.utils.process.Process(
target=self._call_parallel_target, args=(name, cdata, low)
)
proc.start()
ret = {
"name": name,
"result": None,
"changes": {},
"comment": "Started in a separate process",
"proc": proc,
}
return ret
@salt.utils.decorators.state.OutputUnifier("content_check", "unify")
def call(self, low, chunks=None, running=None, retries=1):
"""
Call a state directly with the low data structure, verify data
before processing.
"""
utc_start_time = datetime.datetime.utcnow()
local_start_time = utc_start_time - (
datetime.datetime.utcnow() - datetime.datetime.now()
)
log.info(
"Running state [%s] at time %s",
low["name"].strip() if isinstance(low["name"], str) else low["name"],
local_start_time.time().isoformat(),
)
errors = self.verify_data(low)
if errors:
ret = {
"result": False,
"name": low["name"],
"changes": {},
"comment": "",
}
for err in errors:
ret["comment"] += "{}\n".format(err)
ret["__run_num__"] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
return ret
else:
ret = {"result": False, "name": low["name"], "changes": {}}
self.state_con["runas"] = low.get("runas", None)
if low["state"] == "cmd" and "password" in low:
self.state_con["runas_password"] = low["password"]
else:
self.state_con["runas_password"] = low.get("runas_password", None)
if not low.get("__prereq__"):
log.info(
"Executing state %s.%s for [%s]",
low["state"],
low["fun"],
low["name"].strip() if isinstance(low["name"], str) else low["name"],
)
if "provider" in low:
self.load_modules(low)
state_func_name = "{0[state]}.{0[fun]}".format(low)
cdata = salt.utils.args.format_call(
self.states[state_func_name],
low,
initial_ret={"full": state_func_name},
expected_extra_kws=STATE_INTERNAL_KEYWORDS,
)
inject_globals = {
# Pass a copy of the running dictionary, the low state chunks and
# the current state dictionaries.
# We pass deep copies here because we don't want any misbehaving
# state module to change these at runtime.
"__low__": immutabletypes.freeze(low),
"__running__": immutabletypes.freeze(running) if running else {},
"__instance_id__": self.instance_id,
"__lowstate__": immutabletypes.freeze(chunks) if chunks else {},
}
if "__env__" in low:
inject_globals["__env__"] = str(low["__env__"])
if self.inject_globals:
inject_globals.update(self.inject_globals)
if low.get("__prereq__"):
test = sys.modules[self.states[cdata["full"]].__module__].__opts__["test"]
sys.modules[self.states[cdata["full"]].__module__].__opts__["test"] = True
try:
# Let's get a reference to the salt environment to use within this
# state call.
#
# If the state function accepts an 'env' keyword argument, it
# allows the state to be overridden(we look for that in cdata). If
# that's not found in cdata, we look for what we're being passed in
# the original data, namely, the special dunder __env__. If that's
# not found we default to 'base'
req_list = ("unless", "onlyif", "creates")
if (
any(req in low for req in req_list)
and "{0[state]}.mod_run_check".format(low) not in self.states
):
ret.update(self._run_check(low))
if not self.opts.get("lock_saltenv", False):
# NOTE: Overriding the saltenv when lock_saltenv is blocked in
# salt/modules/state.py, before we ever get here, but this
# additional check keeps use of the State class outside of the
# salt/modules/state.py from getting around this setting.
if "saltenv" in low:
inject_globals["__env__"] = str(low["saltenv"])
elif isinstance(cdata["kwargs"].get("env", None), str):
# User is using a deprecated env setting which was parsed by
# format_call.
# We check for a string type since module functions which
# allow setting the OS environ also make use of the "env"
# keyword argument, which is not a string
inject_globals["__env__"] = str(cdata["kwargs"]["env"])
if "__env__" not in inject_globals:
# Let's use the default environment
inject_globals["__env__"] = "base"
if "__orchestration_jid__" in low:
inject_globals["__orchestration_jid__"] = low["__orchestration_jid__"]
if "result" not in ret or ret["result"] is False:
self.states.inject_globals = inject_globals
if self.mocked:
ret = mock_ret(cdata)
else:
# Execute the state function
if not low.get("__prereq__") and low.get("parallel"):
# run the state call in parallel, but only if not in a prereq
ret = self.call_parallel(cdata, low)
else:
self.format_slots(cdata)
if cdata["full"].split(".")[-1] == "__call__":
# __call__ requires OrderedDict to preserve state order
# kwargs are also invalid overall
ret = self.states[cdata["full"]](
cdata["args"], module=None, state=cdata["kwargs"]
)
else:
ret = self.states[cdata["full"]](
*cdata["args"], **cdata["kwargs"]
)
self.states.inject_globals = {}
if (
"check_cmd" in low
and "{0[state]}.mod_run_check_cmd".format(low) not in self.states
):
ret.update(self._run_check_cmd(low))
except Exception as exc: # pylint: disable=broad-except
log.debug(
"An exception occurred in this state: %s",
exc,
exc_info_on_loglevel=logging.DEBUG,
)
trb = traceback.format_exc()
# There are a number of possibilities to not have the cdata
# populated with what we might have expected, so just be smart
# enough to not raise another KeyError as the name is easily
# guessable and fallback in all cases to present the real
# exception to the user
name = (cdata.get("args") or [None])[0] or cdata["kwargs"].get("name")
if not name:
name = low.get("name", low.get("__id__"))
ret = {
"result": False,
"name": name,
"changes": {},
"comment": "An exception occurred in this state: {}".format(trb),
}
finally:
if low.get("__prereq__"):
sys.modules[self.states[cdata["full"]].__module__].__opts__[
"test"
] = test
self.state_con.pop("runas", None)
self.state_con.pop("runas_password", None)
if not isinstance(ret, dict):
return ret
# If format_call got any warnings, let's show them to the user
if "warnings" in cdata:
ret.setdefault("warnings", []).extend(cdata["warnings"])
if "provider" in low:
self.load_modules()
if low.get("__prereq__"):
low["__prereq__"] = False
return ret
ret["__sls__"] = low.get("__sls__")
ret["__run_num__"] = self.__run_num
self.__run_num += 1
format_log(ret)
self.check_refresh(low, ret)
utc_finish_time = datetime.datetime.utcnow()
timezone_delta = datetime.datetime.utcnow() - datetime.datetime.now()
local_finish_time = utc_finish_time - timezone_delta
local_start_time = utc_start_time - timezone_delta
ret["start_time"] = local_start_time.time().isoformat()
delta = utc_finish_time - utc_start_time
# duration in milliseconds.microseconds
duration = (delta.seconds * 1000000 + delta.microseconds) / 1000.0
ret["duration"] = duration
ret["__id__"] = low["__id__"]
log.info(
"Completed state [%s] at time %s (duration_in_ms=%s)",
low["name"].strip() if isinstance(low["name"], str) else low["name"],
local_finish_time.time().isoformat(),
duration,
)
if "retry" in low:
low["retry"] = self.verify_retry_data(low["retry"])
if not sys.modules[self.states[cdata["full"]].__module__].__opts__["test"]:
if low["retry"]["until"] != ret["result"]:
if low["retry"]["attempts"] > retries:
interval = low["retry"]["interval"]
if low["retry"]["splay"] != 0:
interval = interval + random.randint(
0, low["retry"]["splay"]
)
log.info(
"State result does not match retry until value, "
"state will be re-run in %s seconds",
interval,
)
self.functions["test.sleep"](interval)
retry_ret = self.call(low, chunks, running, retries=retries + 1)
orig_ret = ret
ret = retry_ret
ret["comment"] = "\n".join(
[
'Attempt {}: Returned a result of "{}", '
'with the following comment: "{}"'.format(
retries, orig_ret["result"], orig_ret["comment"]
),
"" if not ret["comment"] else ret["comment"],
]
)
ret["duration"] = (
ret["duration"] + orig_ret["duration"] + (interval * 1000)
)
if retries == 1:
ret["start_time"] = orig_ret["start_time"]
else:
ret["comment"] = " ".join(
[
"" if not ret["comment"] else str(ret["comment"]),
"The state would be retried every {interval} seconds "
"(with a splay of up to {splay} seconds) a maximum of "
"{attempts} times or until a result of {until} "
"is returned".format(**low["retry"]),
]
)
return ret
def __eval_slot(self, slot):
log.debug("Evaluating slot: %s", slot)
fmt = slot.split(":", 2)
if len(fmt) != 3:
log.warning("Malformed slot: %s", slot)
return slot
if fmt[1] != "salt":
log.warning("Malformed slot: %s", slot)
log.warning(
"Only execution modules are currently supported in slots. This means"
' slot should start with "__slot__:salt:"'
)
return slot
fun, args, kwargs = salt.utils.args.parse_function(fmt[2])
if not fun or fun not in self.functions:
log.warning("Malformed slot: %s", slot)
log.warning(
"Execution module should be specified in a function call format: "
"test.arg('arg', kw='kwarg')"
)
return slot
log.debug("Calling slot: %s(%s, %s)", fun, args, kwargs)
slot_return = self.functions[fun](*args, **kwargs)
# Given input __slot__:salt:test.arg(somekey="value").not.exist ~ /appended
# slot_text should be __slot...).not.exist
# append_data should be ~ /appended
slot_text = fmt[2].split("~")[0]
append_data = fmt[2].split("~", 1)[1:]
log.debug("slot_text: %s", slot_text)
log.debug("append_data: %s", append_data)
# Support parsing slot dict response
# return_get should result in a kwargs.nested.dict path by getting
# everything after first closing paren: )
return_get = None
try:
return_get = slot_text[slot_text.rindex(")") + 1 :]
except ValueError:
pass
if return_get:
# remove first period
return_get = return_get.split(".", 1)[1].strip()
log.debug("Searching slot result %s for %s", slot_return, return_get)
slot_return = salt.utils.data.traverse_dict_and_list(
slot_return, return_get, default=None, delimiter="."
)
if append_data:
if isinstance(slot_return, str):
# Append text to slot string result
append_data = " ".join(append_data).strip()
log.debug("appending to slot result: %s", append_data)
slot_return += append_data
else:
log.error("Ignoring slot append, slot result is not a string")
return slot_return
def format_slots(self, cdata):
"""
Read in the arguments from the low level slot syntax to make a last
minute runtime call to gather relevant data for the specific routine
Will parse strings, first level of dictionary values, and strings and
first level dict values inside of lists
"""
# __slot__:salt.cmd.run(foo, bar, baz=qux)
SLOT_TEXT = "__slot__:"
ctx = (("args", enumerate(cdata["args"])), ("kwargs", cdata["kwargs"].items()))
for atype, avalues in ctx:
for ind, arg in avalues:
arg = salt.utils.data.decode(arg, keep=True)
if isinstance(arg, dict):
# Search dictionary values for __slot__:
for key, value in arg.items():
try:
if value.startswith(SLOT_TEXT):
log.trace("Slot processsing dict value %s", value)
cdata[atype][ind][key] = self.__eval_slot(value)
except AttributeError:
# Not a string/slot
continue
elif isinstance(arg, list):
for idx, listvalue in enumerate(arg):
log.trace("Slot processing list value: %s", listvalue)
if isinstance(listvalue, dict):
# Search dict values in list for __slot__:
for key, value in listvalue.items():
try:
if value.startswith(SLOT_TEXT):
log.trace(
"Slot processsing nested dict value %s",
value,
)
cdata[atype][ind][idx][key] = self.__eval_slot(
value
)
except AttributeError:
# Not a string/slot
continue
if isinstance(listvalue, str):
# Search strings in a list for __slot__:
if listvalue.startswith(SLOT_TEXT):
log.trace(
"Slot processsing nested string %s", listvalue
)
cdata[atype][ind][idx] = self.__eval_slot(listvalue)
elif isinstance(arg, str) and arg.startswith(SLOT_TEXT):
# Search strings for __slot__:
log.trace("Slot processsing %s", arg)
cdata[atype][ind] = self.__eval_slot(arg)
else:
# Not a slot, skip it
continue
def verify_retry_data(self, retry_data):
"""
verifies the specified retry data
"""
retry_defaults = {
"until": True,
"attempts": 2,
"splay": 0,
"interval": 30,
}
expected_data = {
"until": bool,
"attempts": int,
"interval": int,
"splay": int,
}
validated_retry_data = {}
if isinstance(retry_data, dict):
for expected_key, value_type in expected_data.items():
if expected_key in retry_data:
if isinstance(retry_data[expected_key], value_type):
validated_retry_data[expected_key] = retry_data[expected_key]
else:
log.warning(
"An invalid value was passed for the retry %s, "
"using default value '%s'",
expected_key,
retry_defaults[expected_key],
)
validated_retry_data[expected_key] = retry_defaults[
expected_key
]
else:
validated_retry_data[expected_key] = retry_defaults[expected_key]
else:
log.warning(
"State is set to retry, but a valid dict for retry "
"configuration was not found. Using retry defaults"
)
validated_retry_data = retry_defaults
return validated_retry_data
def call_chunks(self, chunks):
"""
Iterate over a list of chunks and call them, checking for requires.
"""
# Check for any disabled states
disabled = {}
if "state_runs_disabled" in self.opts["grains"]:
for low in chunks[:]:
state_ = "{}.{}".format(low["state"], low["fun"])
for pat in self.opts["grains"]["state_runs_disabled"]:
if fnmatch.fnmatch(state_, pat):
comment = (
'The state function "{0}" is currently disabled by "{1}", '
"to re-enable, run state.enable {1}.".format(
state_,
pat,
)
)
_tag = _gen_tag(low)
disabled[_tag] = {
"changes": {},
"result": False,
"comment": comment,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
chunks.remove(low)
break
running = {}
for low in chunks:
if "__FAILHARD__" in running:
running.pop("__FAILHARD__")
return running
tag = _gen_tag(low)
if tag not in running:
# Check if this low chunk is paused
action = self.check_pause(low)
if action == "kill":
break
running = self.call_chunk(low, running, chunks)
if self.check_failhard(low, running):
return running
self.active = set()
while True:
if self.reconcile_procs(running):
break
time.sleep(0.01)
ret = dict(list(disabled.items()) + list(running.items()))
return ret
def check_failhard(self, low, running):
"""
Check if the low data chunk should send a failhard signal
"""
tag = _gen_tag(low)
if self.opts.get("test", False):
return False
if low.get("failhard", self.opts["failhard"]) and tag in running:
if running[tag]["result"] is None:
return False
return not running[tag]["result"]
return False
def check_pause(self, low):
"""
Check to see if this low chunk has been paused
"""
if not self.jid:
# Can't pause on salt-ssh since we can't track continuous state
return
pause_path = os.path.join(self.opts["cachedir"], "state_pause", self.jid)
start = time.time()
if os.path.isfile(pause_path):
try:
while True:
tries = 0
with salt.utils.files.fopen(pause_path, "rb") as fp_:
try:
pdat = msgpack_deserialize(fp_.read())
except salt.utils.msgpack.exceptions.UnpackValueError:
# Reading race condition
if tries > 10:
# Break out if there are a ton of read errors
return
tries += 1
time.sleep(1)
continue
id_ = low["__id__"]
key = ""
if id_ in pdat:
key = id_
elif "__all__" in pdat:
key = "__all__"
if key:
if "duration" in pdat[key]:
now = time.time()
if now - start > pdat[key]["duration"]:
return "run"
if "kill" in pdat[key]:
return "kill"
else:
return "run"
time.sleep(1)
except Exception as exc: # pylint: disable=broad-except
log.error(
"Failed to read in pause data for file located at: %s", pause_path
)
return "run"
return "run"
def reconcile_procs(self, running):
"""
Check the running dict for processes and resolve them
"""
retset = set()
for tag in running:
proc = running[tag].get("proc")
if proc:
if not proc.is_alive():
ret_cache = os.path.join(
self.opts["cachedir"],
self.jid,
salt.utils.hashutils.sha1_digest(tag),
)
if not os.path.isfile(ret_cache):
ret = {
"result": False,
"comment": "Parallel process failed to return",
"name": running[tag]["name"],
"changes": {},
}
try:
with salt.utils.files.fopen(ret_cache, "rb") as fp_:
ret = msgpack_deserialize(fp_.read())
except OSError:
ret = {
"result": False,
"comment": "Parallel cache failure",
"name": running[tag]["name"],
"changes": {},
}
running[tag].update(ret)
running[tag].pop("proc")
else:
retset.add(False)
return False not in retset
def check_requisite(self, low, running, chunks, pre=False):
"""
Look into the running data to check the status of all requisite
states
"""
disabled_reqs = self.opts.get("disabled_requisites", [])
if not isinstance(disabled_reqs, list):
disabled_reqs = [disabled_reqs]
present = False
# If mod_watch is not available make it a require
if "watch" in low:
if "{}.mod_watch".format(low["state"]) not in self.states:
if "require" in low:
low["require"].extend(low.pop("watch"))
else:
low["require"] = low.pop("watch")
else:
present = True
if "watch_any" in low:
if "{}.mod_watch".format(low["state"]) not in self.states:
if "require_any" in low:
low["require_any"].extend(low.pop("watch_any"))
else:
low["require_any"] = low.pop("watch_any")
else:
present = True
if "require" in low:
present = True
if "require_any" in low:
present = True
if "prerequired" in low:
present = True
if "prereq" in low:
present = True
if "onfail" in low:
present = True
if "onfail_any" in low:
present = True
if "onfail_all" in low:
present = True
if "onchanges" in low:
present = True
if "onchanges_any" in low:
present = True
if not present:
return "met", ()
self.reconcile_procs(running)
reqs = {
"require": [],
"require_any": [],
"watch": [],
"watch_any": [],
"prereq": [],
"onfail": [],
"onfail_any": [],
"onfail_all": [],
"onchanges": [],
"onchanges_any": [],
}
if pre:
reqs["prerequired"] = []
for r_state in reqs:
if r_state in low and low[r_state] is not None:
if r_state in disabled_reqs:
log.warning(
"The %s requisite has been disabled, Ignoring.", r_state
)
continue
for req in low[r_state]:
if isinstance(req, str):
req = {"id": req}
req = trim_req(req)
found = False
for chunk in chunks:
req_key = next(iter(req))
req_val = req[req_key]
if req_val is None:
continue
if req_key == "sls":
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk["__sls__"], req_val):
found = True
reqs[r_state].append(chunk)
continue
try:
if isinstance(req_val, str):
if fnmatch.fnmatch(
chunk["name"], req_val
) or fnmatch.fnmatch(chunk["__id__"], req_val):
if req_key == "id" or chunk["state"] == req_key:
found = True
reqs[r_state].append(chunk)
else:
raise KeyError
except KeyError as exc:
raise SaltRenderError(
"Could not locate requisite of [{}] present in state"
" with name [{}]".format(req_key, chunk["name"])
)
except TypeError:
# On Python 2, the above req_val, being an OrderedDict, will raise a KeyError,
# however on Python 3 it will raise a TypeError
# This was found when running tests.unit.test_state.StateCompilerTestCase.test_render_error_on_invalid_requisite
raise SaltRenderError(
"Could not locate requisite of [{}] present in state"
" with name [{}]".format(req_key, chunk["name"])
)
if not found:
return "unmet", ()
fun_stats = set()
for r_state, chunks in reqs.items():
req_stats = set()
if r_state.startswith("prereq") and not r_state.startswith("prerequired"):
run_dict = self.pre
else:
run_dict = running
filtered_run_dict = {}
for chunk in chunks:
tag = _gen_tag(chunk)
run_dict_chunk = run_dict.get(tag)
if run_dict_chunk:
filtered_run_dict[tag] = run_dict_chunk
run_dict = filtered_run_dict
while True:
if self.reconcile_procs(run_dict):
break
time.sleep(0.01)
for chunk in chunks:
tag = _gen_tag(chunk)
if tag not in run_dict:
req_stats.add("unmet")
continue
if r_state.startswith("onfail"):
if run_dict[tag]["result"] is True:
req_stats.add("onfail") # At least one state is OK
continue
else:
if run_dict[tag]["result"] is False:
req_stats.add("fail")
continue
if r_state.startswith("onchanges"):
if not run_dict[tag]["changes"]:
req_stats.add("onchanges")
else:
req_stats.add("onchangesmet")
continue
if r_state.startswith("watch") and run_dict[tag]["changes"]:
req_stats.add("change")
continue
if r_state.startswith("prereq") and run_dict[tag]["result"] is None:
if not r_state.startswith("prerequired"):
req_stats.add("premet")
if r_state.startswith("prereq") and not run_dict[tag]["result"] is None:
if not r_state.startswith("prerequired"):
req_stats.add("pre")
else:
if run_dict[tag].get("__state_ran__", True):
req_stats.add("met")
if r_state.endswith("_any") or r_state == "onfail":
if "met" in req_stats or "change" in req_stats:
if "fail" in req_stats:
req_stats.remove("fail")
if "onchangesmet" in req_stats:
if "onchanges" in req_stats:
req_stats.remove("onchanges")
if "fail" in req_stats:
req_stats.remove("fail")
if "onfail" in req_stats:
# a met requisite in this case implies a success
if "met" in req_stats:
req_stats.remove("onfail")
if r_state.endswith("_all"):
if "onfail" in req_stats:
# a met requisite in this case implies a failure
if "met" in req_stats:
req_stats.remove("met")
fun_stats.update(req_stats)
if "unmet" in fun_stats:
status = "unmet"
elif "fail" in fun_stats:
status = "fail"
elif "pre" in fun_stats:
if "premet" in fun_stats:
status = "met"
else:
status = "pre"
elif "onfail" in fun_stats and "onchangesmet" not in fun_stats:
status = "onfail"
elif "onchanges" in fun_stats and "onchangesmet" not in fun_stats:
status = "onchanges"
elif "change" in fun_stats:
status = "change"
elif "onfail" in fun_stats:
status = "onfail"
else:
status = "met"
return status, reqs
def event(self, chunk_ret, length, fire_event=False):
"""
Fire an event on the master bus
If `fire_event` is set to True an event will be sent with the
chunk name in the tag and the chunk result in the event data.
If `fire_event` is set to a string such as `mystate/is/finished`,
an event will be sent with the string added to the tag and the chunk
result in the event data.
If the `state_events` is set to True in the config, then after the
chunk is evaluated an event will be set up to the master with the
results.
"""
if not self.opts.get("local") and (
self.opts.get("state_events", True) or fire_event
):
if not self.opts.get("master_uri"):
ev_func = (
lambda ret, tag, preload=None: salt.utils.event.get_master_event(
self.opts, self.opts["sock_dir"], listen=False
).fire_event(ret, tag)
)
else:
ev_func = self.functions["event.fire_master"]
ret = {"ret": chunk_ret}
if fire_event is True:
tag = salt.utils.event.tagify(
[self.jid, self.opts["id"], str(chunk_ret["name"])],
"state_result",
)
elif isinstance(fire_event, str):
tag = salt.utils.event.tagify(
[self.jid, self.opts["id"], str(fire_event)],
"state_result",
)
else:
tag = salt.utils.event.tagify(
[self.jid, "prog", self.opts["id"], str(chunk_ret["__run_num__"])],
"job",
)
ret["len"] = length
preload = {"jid": self.jid}
ev_func(ret, tag, preload=preload)
def call_chunk(self, low, running, chunks):
"""
Check if a chunk has any requires, execute the requires and then
the chunk
"""
low = self._mod_aggregate(low, running, chunks)
self._mod_init(low)
tag = _gen_tag(low)
if not low.get("prerequired"):
self.active.add(tag)
requisites = [
"require",
"require_any",
"watch",
"watch_any",
"prereq",
"onfail",
"onfail_any",
"onchanges",
"onchanges_any",
]
if not low.get("__prereq__"):
requisites.append("prerequired")
status, reqs = self.check_requisite(low, running, chunks, pre=True)
else:
status, reqs = self.check_requisite(low, running, chunks)
if status == "unmet":
lost = {}
reqs = []
for requisite in requisites:
lost[requisite] = []
if requisite not in low:
continue
for req in low[requisite]:
if isinstance(req, str):
req = {"id": req}
req = trim_req(req)
found = False
req_key = next(iter(req))
req_val = req[req_key]
for chunk in chunks:
if req_val is None:
continue
if req_key == "sls":
# Allow requisite tracking of entire sls files
if fnmatch.fnmatch(chunk["__sls__"], req_val):
if requisite == "prereq":
chunk["__prereq__"] = True
reqs.append(chunk)
found = True
continue
if fnmatch.fnmatch(chunk["name"], req_val) or fnmatch.fnmatch(
chunk["__id__"], req_val
):
if req_key == "id" or chunk["state"] == req_key:
if requisite == "prereq":
chunk["__prereq__"] = True
elif requisite == "prerequired":
chunk["__prerequired__"] = True
reqs.append(chunk)
found = True
if not found:
lost[requisite].append(req)
if (
lost["require"]
or lost["watch"]
or lost["prereq"]
or lost["onfail"]
or lost["onchanges"]
or lost["require_any"]
or lost["watch_any"]
or lost["onfail_any"]
or lost["onchanges_any"]
or lost.get("prerequired")
):
comment = "The following requisites were not found:\n"
for requisite, lreqs in lost.items():
if not lreqs:
continue
comment += "{}{}:\n".format(" " * 19, requisite)
for lreq in lreqs:
req_key = next(iter(lreq))
req_val = lreq[req_key]
comment += "{}{}: {}\n".format(" " * 23, req_key, req_val)
if low.get("__prereq__"):
run_dict = self.pre
else:
run_dict = running
start_time, duration = _calculate_fake_duration()
run_dict[tag] = {
"changes": {},
"result": False,
"duration": duration,
"start_time": start_time,
"comment": comment,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
self.event(run_dict[tag], len(chunks), fire_event=low.get("fire_event"))
return running
for chunk in reqs:
# Check to see if the chunk has been run, only run it if
# it has not been run already
ctag = _gen_tag(chunk)
if ctag not in running:
if ctag in self.active:
if chunk.get("__prerequired__"):
# Prereq recusive, run this chunk with prereq on
if tag not in self.pre:
low["__prereq__"] = True
self.pre[ctag] = self.call(low, chunks, running)
return running
else:
return running
elif ctag not in running:
log.error("Recursive requisite found")
running[tag] = {
"changes": {},
"result": False,
"comment": "Recursive requisite found",
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
self.event(
running[tag], len(chunks), fire_event=low.get("fire_event")
)
return running
running = self.call_chunk(chunk, running, chunks)
if self.check_failhard(chunk, running):
running["__FAILHARD__"] = True
return running
if low.get("__prereq__"):
status, reqs = self.check_requisite(low, running, chunks)
self.pre[tag] = self.call(low, chunks, running)
if not self.pre[tag]["changes"] and status == "change":
self.pre[tag]["changes"] = {"watch": "watch"}
self.pre[tag]["result"] = None
else:
running = self.call_chunk(low, running, chunks)
if self.check_failhard(chunk, running):
running["__FAILHARD__"] = True
return running
elif status == "met":
if low.get("__prereq__"):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
elif status == "fail":
# if the requisite that failed was due to a prereq on this low state
# show the normal error
if tag in self.pre:
running[tag] = self.pre[tag]
running[tag]["__run_num__"] = self.__run_num
running[tag]["__sls__"] = low["__sls__"]
# otherwise the failure was due to a requisite down the chain
else:
# determine what the requisite failures where, and return
# a nice error message
failed_requisites = set()
# look at all requisite types for a failure
for req_lows in reqs.values():
for req_low in req_lows:
req_tag = _gen_tag(req_low)
req_ret = self.pre.get(req_tag, running.get(req_tag))
# if there is no run output for the requisite it
# can't be the failure
if req_ret is None:
continue
# If the result was False (not None) it was a failure
if req_ret["result"] is False:
# use SLS.ID for the key-- so its easier to find
key = "{sls}.{_id}".format(
sls=req_low["__sls__"], _id=req_low["__id__"]
)
failed_requisites.add(key)
_cmt = "One or more requisite failed: {}".format(
", ".join(str(i) for i in failed_requisites)
)
start_time, duration = _calculate_fake_duration()
running[tag] = {
"changes": {},
"result": False,
"duration": duration,
"start_time": start_time,
"comment": _cmt,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.pre[tag] = running[tag]
self.__run_num += 1
elif status == "change" and not low.get("__prereq__"):
ret = self.call(low, chunks, running)
if not ret["changes"] and not ret.get("skip_watch", False):
low = low.copy()
low["sfun"] = low["fun"]
low["fun"] = "mod_watch"
low["__reqs__"] = reqs
ret = self.call(low, chunks, running)
running[tag] = ret
elif status == "pre":
start_time, duration = _calculate_fake_duration()
pre_ret = {
"changes": {},
"result": True,
"duration": duration,
"start_time": start_time,
"comment": "No changes detected",
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
running[tag] = pre_ret
self.pre[tag] = pre_ret
self.__run_num += 1
elif status == "onfail":
start_time, duration = _calculate_fake_duration()
running[tag] = {
"changes": {},
"result": True,
"duration": duration,
"start_time": start_time,
"comment": "State was not run because onfail req did not change",
"__state_ran__": False,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
elif status == "onchanges":
start_time, duration = _calculate_fake_duration()
running[tag] = {
"changes": {},
"result": True,
"duration": duration,
"start_time": start_time,
"comment": (
"State was not run because none of the onchanges reqs changed"
),
"__state_ran__": False,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
self.__run_num += 1
else:
if low.get("__prereq__"):
self.pre[tag] = self.call(low, chunks, running)
else:
running[tag] = self.call(low, chunks, running)
if tag in running:
self.event(running[tag], len(chunks), fire_event=low.get("fire_event"))
for sub_state_data in running[tag].pop("sub_state_run", ()):
start_time, duration = _calculate_fake_duration()
self.__run_num += 1
sub_tag = _gen_tag(sub_state_data["low"])
running[sub_tag] = {
"name": sub_state_data["low"]["name"],
"changes": sub_state_data["changes"],
"result": sub_state_data["result"],
"duration": sub_state_data.get("duration", duration),
"start_time": sub_state_data.get("start_time", start_time),
"comment": sub_state_data.get("comment", ""),
"__state_ran__": True,
"__run_num__": self.__run_num,
"__sls__": low["__sls__"],
}
return running
def call_beacons(self, chunks, running):
"""
Find all of the beacon routines and call the associated mod_beacon runs
"""
listeners = []
crefs = {}
beacons = []
for chunk in chunks:
if "beacon" in chunk:
beacons.append(chunk)
mod_beacons = []
errors = {}
for chunk in beacons:
low = chunk.copy()
low["sfun"] = chunk["fun"]
low["fun"] = "mod_beacon"
low["__id__"] = "beacon_{}".format(low["__id__"])
mod_beacons.append(low)
ret = self.call_chunks(mod_beacons)
running.update(ret)
for err in errors:
errors[err]["__run_num__"] = self.__run_num
self.__run_num += 1
running.update(errors)
return running
def call_listen(self, chunks, running):
"""
Find all of the listen routines and call the associated mod_watch runs
"""
listeners = []
crefs = {}
for chunk in chunks:
crefs[(chunk["state"], chunk["__id__"], chunk["name"])] = chunk
if "listen" in chunk:
listeners.append(
{(chunk["state"], chunk["__id__"], chunk["name"]): chunk["listen"]}
)
if "listen_in" in chunk:
for l_in in chunk["listen_in"]:
for key, val in l_in.items():
listeners.append(
{(key, val, "lookup"): [{chunk["state"]: chunk["__id__"]}]}
)
mod_watchers = []
errors = {}
for l_dict in listeners:
for key, val in l_dict.items():
for listen_to in val:
if not isinstance(listen_to, dict):
found = False
for chunk in chunks:
if (
chunk["__id__"] == listen_to
or chunk["name"] == listen_to
):
listen_to = {chunk["state"]: chunk["__id__"]}
found = True
if not found:
continue
for lkey, lval in listen_to.items():
if not any(lkey == cref[0] and lval in cref for cref in crefs):
rerror = {
_l_tag(lkey, lval): {
"comment": (
"Referenced state {}: {} does not exist".format(
lkey, lval
)
),
"name": "listen_{}:{}".format(lkey, lval),
"result": False,
"changes": {},
}
}
errors.update(rerror)
continue
to_tags = [
_gen_tag(data)
for cref, data in crefs.items()
if lkey == cref[0] and lval in cref
]
for to_tag in to_tags:
if to_tag not in running:
continue
if running[to_tag]["changes"]:
if not any(
key[0] == cref[0] and key[1] in cref
for cref in crefs
):
rerror = {
_l_tag(key[0], key[1]): {
"comment": (
"Referenced state {}: {} does not exist".format(
key[0], key[1]
)
),
"name": "listen_{}:{}".format(
key[0], key[1]
),
"result": False,
"changes": {},
}
}
errors.update(rerror)
continue
new_chunks = [
data
for cref, data in crefs.items()
if key[0] == cref[0] and key[1] in cref
]
for chunk in new_chunks:
low = chunk.copy()
low["sfun"] = chunk["fun"]
low["fun"] = "mod_watch"
low["__id__"] = "listener_{}".format(low["__id__"])
for req in STATE_REQUISITE_KEYWORDS:
if req in low:
low.pop(req)
mod_watchers.append(low)
ret = self.call_chunks(mod_watchers)
running.update(ret)
for err in errors:
errors[err]["__run_num__"] = self.__run_num
self.__run_num += 1
running.update(errors)
return running
def inject_default_call(self, high):
"""
Sets .call function to a state, if not there.
:param high:
:return:
"""
for chunk in high:
state = high[chunk]
if not isinstance(state, collections.Mapping):
continue
for state_ref in state:
needs_default = True
if not isinstance(state[state_ref], list):
continue
for argset in state[state_ref]:
if isinstance(argset, str):
needs_default = False
break
if needs_default:
state[state_ref].insert(-1, "__call__")
def call_high(self, high, orchestration_jid=None):
"""
Process a high data call and ensure the defined states.
"""
self.inject_default_call(high)
errors = []
# If there is extension data reconcile it
high, ext_errors = self.reconcile_extend(high)
errors.extend(ext_errors)
errors.extend(self.verify_high(high))
if errors:
return errors
high, req_in_errors = self.requisite_in(high)
errors.extend(req_in_errors)
high = self.apply_exclude(high)
# Verify that the high data is structurally sound
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.compile_high_data(high, orchestration_jid)
# If there are extensions in the highstate, process them and update
# the low data chunks
if errors:
return errors
ret = self.call_chunks(chunks)
ret = self.call_listen(chunks, ret)
ret = self.call_beacons(chunks, ret)
def _cleanup_accumulator_data():
accum_data_path = os.path.join(
get_accumulator_dir(self.opts["cachedir"]), self.instance_id
)
try:
os.remove(accum_data_path)
log.debug("Deleted accumulator data file %s", accum_data_path)
except OSError:
log.debug("File %s does not exist, no need to cleanup", accum_data_path)
_cleanup_accumulator_data()
if self.jid is not None:
pause_path = os.path.join(self.opts["cachedir"], "state_pause", self.jid)
if os.path.isfile(pause_path):
try:
os.remove(pause_path)
except OSError:
# File is not present, all is well
pass
return ret
def render_template(self, high, template):
errors = []
if not high:
return high, errors
if not isinstance(high, dict):
errors.append(
"Template {} does not render to a dictionary".format(template)
)
return high, errors
invalid_items = ("include", "exclude", "extends")
for item in invalid_items:
if item in high:
errors.append(
"The '{}' declaration found on '{}' is invalid when "
"rendering single templates".format(item, template)
)
return high, errors
for name in high:
if not isinstance(high[name], dict):
if isinstance(high[name], str):
# Is this is a short state, it needs to be padded
if "." in high[name]:
comps = high[name].split(".")
high[name] = {
# '__sls__': template,
# '__env__': None,
comps[0]: [comps[1]]
}
continue
errors.append(
"ID {} in template {} is not a dictionary".format(
name, template
)
)
continue
skeys = set()
for key in sorted(high[name]):
if key.startswith("_"):
continue
if high[name][key] is None:
errors.append(
"ID '{}' in template {} contains a short "
"declaration ({}) with a trailing colon. When not "
"passing any arguments to a state, the colon must be "
"omitted.".format(name, template, key)
)
continue
if not isinstance(high[name][key], list):
continue
if "." in key:
comps = key.split(".")
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
"ID '{}' in template '{}' contains multiple "
"state declarations of the same type".format(name, template)
)
continue
high[name][comps[0]] = high[name].pop(key)
high[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
return high, errors
def call_template(self, template):
"""
Enforce the states in a template
"""
high = compile_template(
template,
self.rend,
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
if not high:
return high
high, errors = self.render_template(high, template)
if errors:
return errors
return self.call_high(high)
def call_template_str(self, template):
"""
Enforce the states in a template, pass the template as a string
"""
high = compile_template_str(
template,
self.rend,
self.opts["renderer"],
self.opts["renderer_blacklist"],
self.opts["renderer_whitelist"],
)
if not high:
return high
high, errors = self.render_template(high, "<template-str>")
if errors:
return errors
return self.call_high(high)
class LazyAvailStates:
"""
The LazyAvailStates lazily loads the list of states of available
environments.
This is particularly usefull when top_file_merging_strategy=same and there
are many environments.
"""
def __init__(self, hs):
self._hs = hs
self._avail = {"base": None}
self._filled = False
def _fill(self):
if self._filled:
return
for saltenv in self._hs._get_envs():
if saltenv not in self._avail:
self._avail[saltenv] = None
self._filled = True
def __contains__(self, saltenv):
if saltenv == "base":
return True
self._fill()
return saltenv in self._avail
def __getitem__(self, saltenv):
if saltenv != "base":
self._fill()
if self._avail[saltenv] is None:
self._avail[saltenv] = self._hs.client.list_states(saltenv)
return self._avail[saltenv]
def items(self):
self._fill()
ret = []
for saltenv, states in self._avail.items():
ret.append((saltenv, self.__getitem__(saltenv)))
return ret
class BaseHighState:
"""
The BaseHighState is an abstract base class that is the foundation of
running a highstate, extend it and add a self.state object of type State.
When extending this class, please note that ``self.client`` and
``self.matcher`` should be instantiated and handled.
"""
def __init__(self, opts):
self.opts = self.__gen_opts(opts)
self.iorder = 10000
self.avail = self.__gather_avail()
self.building_highstate = OrderedDict()
def __gather_avail(self):
"""
Lazily gather the lists of available sls data from the master
"""
return LazyAvailStates(self)
def __gen_opts(self, opts):
"""
The options used by the High State object are derived from options
on the minion and the master, or just the minion if the high state
call is entirely local.
"""
# If the state is intended to be applied locally, then the local opts
# should have all of the needed data, otherwise overwrite the local
# data items with data from the master
if "local_state" in opts:
if opts["local_state"]:
return opts
mopts = self.client.master_opts()
if not isinstance(mopts, dict):
# An error happened on the master
opts["renderer"] = "jinja|yaml"
opts["failhard"] = False
opts["state_top"] = salt.utils.url.create("top.sls")
opts["nodegroups"] = {}
opts["file_roots"] = {"base": [syspaths.BASE_FILE_ROOTS_DIR]}
else:
opts["renderer"] = mopts["renderer"]
opts["failhard"] = mopts.get("failhard", False)
if mopts["state_top"].startswith("salt://"):
opts["state_top"] = mopts["state_top"]
elif mopts["state_top"].startswith("/"):
opts["state_top"] = salt.utils.url.create(mopts["state_top"][1:])
else:
opts["state_top"] = salt.utils.url.create(mopts["state_top"])
opts["state_top_saltenv"] = mopts.get("state_top_saltenv", None)
opts["nodegroups"] = mopts.get("nodegroups", {})
opts["state_auto_order"] = mopts.get(
"state_auto_order", opts["state_auto_order"]
)
opts["file_roots"] = mopts["file_roots"]
opts["top_file_merging_strategy"] = mopts.get(
"top_file_merging_strategy", opts.get("top_file_merging_strategy")
)
opts["env_order"] = mopts.get("env_order", opts.get("env_order", []))
opts["default_top"] = mopts.get("default_top", opts.get("default_top"))
opts["state_events"] = mopts.get("state_events")
opts["state_aggregate"] = mopts.get(
"state_aggregate", opts.get("state_aggregate", False)
)
opts["jinja_env"] = mopts.get("jinja_env", {})
opts["jinja_sls_env"] = mopts.get("jinja_sls_env", {})
opts["jinja_lstrip_blocks"] = mopts.get("jinja_lstrip_blocks", False)
opts["jinja_trim_blocks"] = mopts.get("jinja_trim_blocks", False)
return opts
def _get_envs(self):
"""
Pull the file server environments out of the master options
"""
envs = ["base"]
if "file_roots" in self.opts:
envs.extend([x for x in list(self.opts["file_roots"]) if x not in envs])
env_order = self.opts.get("env_order", [])
# Remove duplicates while preserving the order
members = set()
env_order = [
env for env in env_order if not (env in members or members.add(env))
]
client_envs = self.client.envs()
if env_order and client_envs:
return [env for env in env_order if env in client_envs]
elif env_order:
return env_order
else:
envs.extend([env for env in client_envs if env not in envs])
return envs
def get_tops(self):
"""
Gather the top files
"""
tops = DefaultOrderedDict(list)
include = DefaultOrderedDict(list)
done = DefaultOrderedDict(list)
found = 0 # did we find any contents in the top files?
# Gather initial top files
merging_strategy = self.opts["top_file_merging_strategy"]
if merging_strategy == "same" and not self.opts["saltenv"]:
if not self.opts["default_top"]:
raise SaltRenderError(
"top_file_merging_strategy set to 'same', but no "
"default_top configuration option was set"
)
if self.opts["saltenv"]:
contents = self.client.cache_file(
self.opts["state_top"], self.opts["saltenv"]
)
if contents:
found = 1
tops[self.opts["saltenv"]] = [
compile_template(
contents,
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv=self.opts["saltenv"],
)
]
else:
tops[self.opts["saltenv"]] = [{}]
else:
found = 0
state_top_saltenv = self.opts.get("state_top_saltenv", False)
if state_top_saltenv and not isinstance(state_top_saltenv, str):
state_top_saltenv = str(state_top_saltenv)
for saltenv in (
[state_top_saltenv] if state_top_saltenv else self._get_envs()
):
contents = self.client.cache_file(self.opts["state_top"], saltenv)
if contents:
found = found + 1
tops[saltenv].append(
compile_template(
contents,
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv=saltenv,
)
)
else:
tops[saltenv].append({})
log.debug("No contents loaded for saltenv '%s'", saltenv)
if (
found > 1
and merging_strategy == "merge"
and not self.opts.get("env_order", None)
):
log.warning(
"top_file_merging_strategy is set to '%s' and "
"multiple top files were found. Merging order is not "
"deterministic, it may be desirable to either set "
"top_file_merging_strategy to 'same' or use the "
"'env_order' configuration parameter to specify the "
"merging order.",
merging_strategy,
)
if found == 0:
log.debug(
"No contents found in top file. If this is not expected, "
"verify that the 'file_roots' specified in 'etc/master' "
"are accessible. The 'file_roots' configuration is: %s",
repr(self.state.opts["file_roots"]),
)
# Search initial top files for includes
for saltenv, ctops in tops.items():
for ctop in ctops:
if "include" not in ctop:
continue
for sls in ctop["include"]:
include[saltenv].append(sls)
ctop.pop("include")
# Go through the includes and pull out the extra tops and add them
while include:
pops = []
for saltenv, states in include.items():
pops.append(saltenv)
if not states:
continue
for sls_match in states:
for sls in fnmatch.filter(self.avail[saltenv], sls_match):
if sls in done[saltenv]:
continue
tops[saltenv].append(
compile_template(
self.client.get_state(sls, saltenv).get("dest", False),
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv,
)
)
done[saltenv].append(sls)
for saltenv in pops:
if saltenv in include:
include.pop(saltenv)
return tops
def merge_tops(self, tops):
"""
Cleanly merge the top files
"""
merging_strategy = self.opts["top_file_merging_strategy"]
try:
merge_attr = "_merge_tops_{}".format(merging_strategy)
merge_func = getattr(self, merge_attr)
if not hasattr(merge_func, "__call__"):
msg = "'{}' is not callable".format(merge_attr)
log.error(msg)
raise TypeError(msg)
except (AttributeError, TypeError):
log.warning(
"Invalid top_file_merging_strategy '%s', falling back to 'merge'",
merging_strategy,
)
merge_func = self._merge_tops_merge
return merge_func(tops)
def _merge_tops_merge(self, tops):
"""
The default merging strategy. The base env is authoritative, so it is
checked first, followed by the remaining environments. In top files
from environments other than "base", only the section matching the
environment from the top file will be considered, and it too will be
ignored if that environment was defined in the "base" top file.
"""
top = DefaultOrderedDict(OrderedDict)
# Check base env first as it is authoritative
base_tops = tops.pop("base", DefaultOrderedDict(OrderedDict))
for ctop in base_tops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
for cenv, ctops in tops.items():
for ctop in ctops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
elif saltenv != cenv:
log.debug(
"Section for saltenv '%s' in the '%s' "
"saltenv's top file will be ignored, as the "
"top_file_merging_strategy is set to 'merge' "
"and the saltenvs do not match",
saltenv,
cenv,
)
continue
elif saltenv in top:
log.debug(
"Section for saltenv '%s' in the '%s' "
"saltenv's top file will be ignored, as this "
"saltenv was already defined in the 'base' top "
"file",
saltenv,
cenv,
)
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
return top
def _merge_tops_same(self, tops):
"""
For each saltenv, only consider the top file from that saltenv. All
sections matching a given saltenv, which appear in a different
saltenv's top file, will be ignored.
"""
top = DefaultOrderedDict(OrderedDict)
for cenv, ctops in tops.items():
if all([x == {} for x in ctops]):
# No top file found in this env, check the default_top
default_top = self.opts["default_top"]
fallback_tops = tops.get(default_top, [])
if all([x == {} for x in fallback_tops]):
# Nothing in the fallback top file
log.error(
"The '%s' saltenv has no top file, and the fallback "
"saltenv specified by default_top (%s) also has no "
"top file",
cenv,
default_top,
)
continue
for ctop in fallback_tops:
for saltenv, targets in ctop.items():
if saltenv != cenv:
continue
log.debug(
"The '%s' saltenv has no top file, using the "
"default_top saltenv (%s)",
cenv,
default_top,
)
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
break
else:
log.error(
"The '%s' saltenv has no top file, and no "
"matches were found in the top file for the "
"default_top saltenv (%s)",
cenv,
default_top,
)
continue
else:
for ctop in ctops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
elif saltenv != cenv:
log.debug(
"Section for saltenv '%s' in the '%s' "
"saltenv's top file will be ignored, as the "
"top_file_merging_strategy is set to 'same' "
"and the saltenvs do not match",
saltenv,
cenv,
)
continue
try:
for tgt in targets:
top[saltenv][tgt] = ctop[saltenv][tgt]
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
return top
def _merge_tops_merge_all(self, tops):
"""
Merge the top files into a single dictionary
"""
def _read_tgt(tgt):
match_type = None
states = []
for item in tgt:
if isinstance(item, dict):
match_type = item
if isinstance(item, str):
states.append(item)
return match_type, states
top = DefaultOrderedDict(OrderedDict)
for ctops in tops.values():
for ctop in ctops:
for saltenv, targets in ctop.items():
if saltenv == "include":
continue
try:
for tgt in targets:
if tgt not in top[saltenv]:
top[saltenv][tgt] = ctop[saltenv][tgt]
continue
m_type1, m_states1 = _read_tgt(top[saltenv][tgt])
m_type2, m_states2 = _read_tgt(ctop[saltenv][tgt])
merged = []
match_type = m_type2 or m_type1
if match_type is not None:
merged.append(match_type)
merged.extend(m_states1)
merged.extend([x for x in m_states2 if x not in merged])
top[saltenv][tgt] = merged
except TypeError:
raise SaltRenderError(
"Unable to render top file. No targets found."
)
return top
def verify_tops(self, tops):
"""
Verify the contents of the top file data
"""
errors = []
if not isinstance(tops, dict):
errors.append("Top data was not formed as a dict")
# No further checks will work, bail out
return errors
for saltenv, matches in tops.items():
if saltenv == "include":
continue
if not isinstance(saltenv, str):
errors.append(
"Environment {} in top file is not formed as a string".format(
saltenv
)
)
if saltenv == "":
errors.append("Empty saltenv statement in top file")
if not isinstance(matches, dict):
errors.append(
"The top file matches for saltenv {} are not "
"formatted as a dict".format(saltenv)
)
for slsmods in matches.values():
if not isinstance(slsmods, list):
errors.append(
"Malformed topfile (state declarations not formed as a list)"
)
continue
for slsmod in slsmods:
if isinstance(slsmod, dict):
# This value is a match option
for val in slsmod.values():
if not val:
errors.append(
"Improperly formatted top file matcher "
"in saltenv {}: {} file".format(slsmod, val)
)
elif isinstance(slsmod, str):
# This is a sls module
if not slsmod:
errors.append(
"Environment {} contains an empty sls index".format(
saltenv
)
)
return errors
def get_top(self):
"""
Returns the high data derived from the top file
"""
try:
tops = self.get_tops()
except SaltRenderError as err:
log.error("Unable to render top file: %s", err.error)
return {}
return self.merge_tops(tops)
def top_matches(self, top):
"""
Search through the top high data for matches and return the states
that this minion needs to execute.
Returns:
{'saltenv': ['state1', 'state2', ...]}
"""
matches = DefaultOrderedDict(OrderedDict)
# pylint: disable=cell-var-from-loop
for saltenv, body in top.items():
if self.opts["saltenv"]:
if saltenv != self.opts["saltenv"]:
continue
for match, data in body.items():
def _filter_matches(_match, _data, _opts):
if isinstance(_data, str):
_data = [_data]
if self.matchers["confirm_top.confirm_top"](_match, _data, _opts):
if saltenv not in matches:
matches[saltenv] = []
for item in _data:
if "subfilter" in item:
_tmpdata = item.pop("subfilter")
for match, data in _tmpdata.items():
_filter_matches(match, data, _opts)
if isinstance(item, str):
matches[saltenv].append(item)
elif isinstance(item, dict):
env_key, inc_sls = item.popitem()
if env_key not in self.avail:
continue
if env_key not in matches:
matches[env_key] = []
matches[env_key].append(inc_sls)
_filter_matches(match, data, self.opts["nodegroups"])
ext_matches = self._master_tops()
for saltenv in ext_matches:
top_file_matches = matches.get(saltenv, [])
if self.opts.get("master_tops_first"):
first = ext_matches[saltenv]
second = top_file_matches
else:
first = top_file_matches
second = ext_matches[saltenv]
matches[saltenv] = first + [x for x in second if x not in first]
# pylint: enable=cell-var-from-loop
return matches
def _master_tops(self):
"""
Get results from the master_tops system. Override this function if the
execution of the master_tops needs customization.
"""
return self.client.master_tops()
def load_dynamic(self, matches):
"""
If autoload_dynamic_modules is True then automatically load the
dynamic modules
"""
if not self.opts["autoload_dynamic_modules"]:
return
syncd = self.state.functions["saltutil.sync_all"](list(matches), refresh=False)
if syncd["grains"]:
self.opts["grains"] = salt.loader.grains(self.opts)
self.state.opts["pillar"] = self.state._gather_pillar()
self.state.module_refresh()
def render_state(self, sls, saltenv, mods, matches, local=False, context=None):
"""
Render a state file and retrieve all of the include states
"""
errors = []
if not local:
state_data = self.client.get_state(sls, saltenv)
fn_ = state_data.get("dest", False)
else:
fn_ = sls
if not os.path.isfile(fn_):
errors.append(
"Specified SLS {} on local filesystem cannot be found.".format(sls)
)
state = None
if not fn_:
errors.append(
"Specified SLS {} in saltenv {} is not "
"available on the salt master or through a configured "
"fileserver".format(sls, saltenv)
)
else:
try:
state = compile_template(
fn_,
self.state.rend,
self.state.opts["renderer"],
self.state.opts["renderer_blacklist"],
self.state.opts["renderer_whitelist"],
saltenv,
sls,
rendered_sls=mods,
context=context,
)
except SaltRenderError as exc:
msg = "Rendering SLS '{}:{}' failed: {}".format(saltenv, sls, exc)
log.critical(msg)
errors.append(msg)
except Exception as exc: # pylint: disable=broad-except
msg = "Rendering SLS {} failed, render error: {}".format(sls, exc)
log.critical(
msg,
# Show the traceback if the debug logging level is enabled
exc_info_on_loglevel=logging.DEBUG,
)
errors.append("{}\n{}".format(msg, traceback.format_exc()))
try:
mods.add("{}:{}".format(saltenv, sls))
except AttributeError:
pass
if state:
if not isinstance(state, dict):
errors.append("SLS {} does not render to a dictionary".format(sls))
else:
include = []
if "include" in state:
if not isinstance(state["include"], list):
err = (
"Include Declaration in SLS {} is not formed "
"as a list".format(sls)
)
errors.append(err)
else:
include = state.pop("include")
self._handle_extend(state, sls, saltenv, errors)
self._handle_exclude(state, sls, saltenv, errors)
self._handle_state_decls(state, sls, saltenv, errors)
for inc_sls in include:
# inc_sls may take the form of:
# 'sls.to.include' <- same as {<saltenv>: 'sls.to.include'}
# {<env_key>: 'sls.to.include'}
# {'_xenv': 'sls.to.resolve'}
xenv_key = "_xenv"
if isinstance(inc_sls, dict):
env_key, inc_sls = inc_sls.popitem()
else:
env_key = saltenv
if env_key not in self.avail:
msg = (
"Nonexistent saltenv '{}' found in include "
"of '{}' within SLS '{}:{}'".format(
env_key, inc_sls, saltenv, sls
)
)
log.error(msg)
errors.append(msg)
continue
if inc_sls.startswith("."):
match = re.match(r"^(\.+)(.*)$", inc_sls)
if match:
levels, include = match.groups()
else:
msg = (
"Badly formatted include {} found in include "
"in SLS '{}:{}'".format(inc_sls, saltenv, sls)
)
log.error(msg)
errors.append(msg)
continue
level_count = len(levels)
p_comps = sls.split(".")
if state_data.get("source", "").endswith("/init.sls"):
p_comps.append("init")
if level_count > len(p_comps):
msg = (
"Attempted relative include of '{}' "
"within SLS '{}:{}' "
"goes beyond top level package ".format(
inc_sls, saltenv, sls
)
)
log.error(msg)
errors.append(msg)
continue
inc_sls = ".".join(p_comps[:-level_count] + [include])
if env_key != xenv_key:
if matches is None:
matches = []
# Resolve inc_sls in the specified environment
if env_key in matches or fnmatch.filter(
self.avail[env_key], inc_sls
):
resolved_envs = [env_key]
else:
resolved_envs = []
else:
# Resolve inc_sls in the subset of environment matches
resolved_envs = [
aenv
for aenv in matches
if fnmatch.filter(self.avail[aenv], inc_sls)
]
# An include must be resolved to a single environment, or
# the include must exist in the current environment
if len(resolved_envs) == 1 or saltenv in resolved_envs:
# Match inc_sls against the available states in the
# resolved env, matching wildcards in the process. If
# there were no matches, then leave inc_sls as the
# target so that the next recursion of render_state
# will recognize the error.
sls_targets = fnmatch.filter(self.avail[saltenv], inc_sls) or [
inc_sls
]
for sls_target in sls_targets:
r_env = (
resolved_envs[0] if len(resolved_envs) == 1 else saltenv
)
mod_tgt = "{}:{}".format(r_env, sls_target)
if mod_tgt not in mods:
nstate, err = self.render_state(
sls_target, r_env, mods, matches
)
if nstate:
self.merge_included_states(state, nstate, errors)
state.update(nstate)
if err:
errors.extend(err)
else:
msg = ""
if not resolved_envs:
msg = (
"Unknown include: Specified SLS {}: {} is not available"
" on the salt master in saltenv(s): {} ".format(
env_key,
inc_sls,
", ".join(matches)
if env_key == xenv_key
else env_key,
)
)
elif len(resolved_envs) > 1:
msg = (
"Ambiguous include: Specified SLS {}: {} is available"
" on the salt master in multiple available saltenvs: {}".format(
env_key, inc_sls, ", ".join(resolved_envs)
)
)
log.critical(msg)
errors.append(msg)
try:
self._handle_iorder(state)
except TypeError:
log.critical("Could not render SLS %s. Syntax error detected.", sls)
else:
state = {}
return state, errors
def _handle_iorder(self, state):
"""
Take a state and apply the iorder system
"""
if self.opts["state_auto_order"]:
for name in state:
for s_dec in state[name]:
if not isinstance(s_dec, str):
# PyDSL OrderedDict?
continue
if not isinstance(state[name], dict):
# Include's or excludes as lists?
continue
if not isinstance(state[name][s_dec], list):
# Bad syntax, let the verify seq pick it up later on
continue
found = False
if s_dec.startswith("_"):
continue
for arg in state[name][s_dec]:
if isinstance(arg, dict):
if len(arg) > 0:
if next(iter(arg.keys())) == "order":
found = True
if not found:
if not isinstance(state[name][s_dec], list):
# quite certainly a syntax error, managed elsewhere
continue
state[name][s_dec].append({"order": self.iorder})
self.iorder += 1
return state
def _handle_state_decls(self, state, sls, saltenv, errors):
"""
Add sls and saltenv components to the state
"""
for name in state:
if not isinstance(state[name], dict):
if name == "__extend__":
continue
if name == "__exclude__":
continue
if isinstance(state[name], str):
# Is this is a short state, it needs to be padded
if "." in state[name]:
comps = state[name].split(".")
state[name] = {
"__sls__": sls,
"__env__": saltenv,
comps[0]: [comps[1]],
}
continue
errors.append("ID {} in SLS {} is not a dictionary".format(name, sls))
continue
skeys = set()
for key in list(state[name]):
if key.startswith("_"):
continue
if not isinstance(state[name][key], list):
continue
if "." in key:
comps = key.split(".")
# Salt doesn't support state files such as:
#
# /etc/redis/redis.conf:
# file.managed:
# - source: salt://redis/redis.conf
# - user: redis
# - group: redis
# - mode: 644
# file.comment:
# - regex: ^requirepass
if comps[0] in skeys:
errors.append(
"ID '{}' in SLS '{}' contains multiple state "
"declarations of the same type".format(name, sls)
)
continue
state[name][comps[0]] = state[name].pop(key)
state[name][comps[0]].append(comps[1])
skeys.add(comps[0])
continue
skeys.add(key)
if "__sls__" not in state[name]:
state[name]["__sls__"] = sls
if "__env__" not in state[name]:
state[name]["__env__"] = saltenv
def _handle_extend(self, state, sls, saltenv, errors):
"""
Take the extend dec out of state and apply to the highstate global
dec
"""
if "extend" in state:
ext = state.pop("extend")
if not isinstance(ext, dict):
errors.append(
"Extension value in SLS '{}' is not a dictionary".format(sls)
)
return
for name in ext:
if not isinstance(ext[name], dict):
errors.append(
"Extension name '{}' in SLS '{}' is not a dictionary".format(
name, sls
)
)
continue
if "__sls__" not in ext[name]:
ext[name]["__sls__"] = sls
if "__env__" not in ext[name]:
ext[name]["__env__"] = saltenv
for key in list(ext[name]):
if key.startswith("_"):
continue
if not isinstance(ext[name][key], list):
continue
if "." in key:
comps = key.split(".")
ext[name][comps[0]] = ext[name].pop(key)
ext[name][comps[0]].append(comps[1])
state.setdefault("__extend__", []).append(ext)
def _handle_exclude(self, state, sls, saltenv, errors):
"""
Take the exclude dec out of the state and apply it to the highstate
global dec
"""
if "exclude" in state:
exc = state.pop("exclude")
if not isinstance(exc, list):
err = "Exclude Declaration in SLS {} is not formed as a list".format(
sls
)
errors.append(err)
state.setdefault("__exclude__", []).extend(exc)
def render_highstate(self, matches, context=None):
"""
Gather the state files and render them into a single unified salt
high data structure.
"""
highstate = self.building_highstate
all_errors = []
mods = set()
statefiles = []
for saltenv, states in matches.items():
for sls_match in states:
if saltenv in self.avail:
statefiles = fnmatch.filter(self.avail[saltenv], sls_match)
elif "__env__" in self.avail:
statefiles = fnmatch.filter(self.avail["__env__"], sls_match)
else:
all_errors.append(
"No matching salt environment for environment "
"'{}' found".format(saltenv)
)
# if we did not found any sls in the fileserver listing, this
# may be because the sls was generated or added later, we can
# try to directly execute it, and if it fails, anyway it will
# return the former error
if not statefiles:
statefiles = [sls_match]
for sls in statefiles:
r_env = "{}:{}".format(saltenv, sls)
if r_env in mods:
continue
state, errors = self.render_state(
sls, saltenv, mods, matches, context=context
)
if state:
self.merge_included_states(highstate, state, errors)
for i, error in enumerate(errors[:]):
if "is not available" in error:
# match SLS foobar in environment
this_sls = "SLS {} in saltenv".format(sls_match)
if this_sls in error:
errors[
i
] = "No matching sls found for '{}' in env '{}'".format(
sls_match, saltenv
)
all_errors.extend(errors)
self.clean_duplicate_extends(highstate)
return highstate, all_errors
def clean_duplicate_extends(self, highstate):
if "__extend__" in highstate:
highext = []
for items in (ext.items() for ext in highstate["__extend__"]):
for item in items:
if item not in highext:
highext.append(item)
highstate["__extend__"] = [{t[0]: t[1]} for t in highext]
def merge_included_states(self, highstate, state, errors):
# The extend members can not be treated as globally unique:
if "__extend__" in state:
highstate.setdefault("__extend__", []).extend(state.pop("__extend__"))
if "__exclude__" in state:
highstate.setdefault("__exclude__", []).extend(state.pop("__exclude__"))
for id_ in state:
if id_ in highstate:
if highstate[id_] != state[id_]:
errors.append(
"Detected conflicting IDs, SLS"
" IDs need to be globally unique.\n The"
" conflicting ID is '{}' and is found in SLS"
" '{}:{}' and SLS '{}:{}'".format(
id_,
highstate[id_]["__env__"],
highstate[id_]["__sls__"],
state[id_]["__env__"],
state[id_]["__sls__"],
)
)
try:
highstate.update(state)
except ValueError:
errors.append("Error when rendering state with contents: {}".format(state))
def _check_pillar(self, force=False):
"""
Check the pillar for errors, refuse to run the state if there are
errors in the pillar and return the pillar errors
"""
if force:
return True
if "_errors" in self.state.opts["pillar"]:
return False
return True
def matches_whitelist(self, matches, whitelist):
"""
Reads over the matches and returns a matches dict with just the ones
that are in the whitelist
"""
if not whitelist:
return matches
ret_matches = {}
if not isinstance(whitelist, list):
whitelist = whitelist.split(",")
for env in matches:
for sls in matches[env]:
if sls in whitelist:
ret_matches[env] = ret_matches[env] if env in ret_matches else []
ret_matches[env].append(sls)
return ret_matches
def call_highstate(
self,
exclude=None,
cache=None,
cache_name="highstate",
force=False,
whitelist=None,
orchestration_jid=None,
):
"""
Run the sequence to execute the salt highstate for this minion
"""
# Check that top file exists
tag_name = "no_|-states_|-states_|-None"
ret = {
tag_name: {
"result": False,
"comment": "No states found for this minion",
"name": "No States",
"changes": {},
"__run_num__": 0,
}
}
cfn = os.path.join(self.opts["cachedir"], "{}.cache.p".format(cache_name))
if cache:
if os.path.isfile(cfn):
with salt.utils.files.fopen(cfn, "rb") as fp_:
high = salt.payload.load(fp_)
return self.state.call_high(high, orchestration_jid)
# File exists so continue
err = []
try:
top = self.get_top()
except SaltRenderError as err:
ret[tag_name]["comment"] = "Unable to render top file: "
ret[tag_name]["comment"] += str(err.error)
return ret
except Exception: # pylint: disable=broad-except
trb = traceback.format_exc()
err.append(trb)
return err
err += self.verify_tops(top)
matches = self.top_matches(top)
if not matches:
msg = (
"No Top file or master_tops data matches found. Please see "
"master log for details."
)
ret[tag_name]["comment"] = msg
return ret
matches = self.matches_whitelist(matches, whitelist)
self.load_dynamic(matches)
if not self._check_pillar(force):
err += ["Pillar failed to render with the following messages:"]
err += self.state.opts["pillar"]["_errors"]
else:
high, errors = self.render_highstate(matches)
if exclude:
if isinstance(exclude, str):
exclude = exclude.split(",")
if "__exclude__" in high:
high["__exclude__"].extend(exclude)
else:
high["__exclude__"] = exclude
err += errors
if err:
return err
if not high:
return ret
with salt.utils.files.set_umask(0o077):
try:
if salt.utils.platform.is_windows():
# Make sure cache file isn't read-only
self.state.functions["cmd.run"](
["attrib", "-R", cfn],
python_shell=False,
output_loglevel="quiet",
)
with salt.utils.files.fopen(cfn, "w+b") as fp_:
try:
salt.payload.dump(high, fp_)
except TypeError:
# Can't serialize pydsl
pass
except OSError:
log.error('Unable to write to "state.highstate" cache file %s', cfn)
return self.state.call_high(high, orchestration_jid)
def compile_highstate(self):
"""
Return just the highstate or the errors
"""
err = []
top = self.get_top()
err += self.verify_tops(top)
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
err += errors
if err:
return err
return high
def compile_low_chunks(self):
"""
Compile the highstate but don't run it, return the low chunks to
see exactly what the highstate will execute
"""
top = self.get_top()
matches = self.top_matches(top)
high, errors = self.render_highstate(matches)
# If there is extension data reconcile it
high, ext_errors = self.state.reconcile_extend(high)
errors += ext_errors
# Verify that the high data is structurally sound
errors += self.state.verify_high(high)
high, req_in_errors = self.state.requisite_in(high)
errors += req_in_errors
high = self.state.apply_exclude(high)
if errors:
return errors
# Compile and verify the raw chunks
chunks = self.state.compile_high_data(high)
return chunks
def compile_state_usage(self):
"""
Return all used and unused states for the minion based on the top match data
"""
err = []
top = self.get_top()
err += self.verify_tops(top)
if err:
return err
matches = self.top_matches(top)
state_usage = {}
for saltenv, states in self.avail.items():
env_usage = {
"used": [],
"unused": [],
"count_all": 0,
"count_used": 0,
"count_unused": 0,
}
env_matches = matches.get(saltenv)
for state in states:
env_usage["count_all"] += 1
if state in env_matches:
env_usage["count_used"] += 1
env_usage["used"].append(state)
else:
env_usage["count_unused"] += 1
env_usage["unused"].append(state)
state_usage[saltenv] = env_usage
return state_usage
def destroy(self):
raise NotImplementedError
def __enter__(self):
return self
def __exit__(self, *_):
self.destroy()
class HighState(BaseHighState):
"""
Generate and execute the salt "High State". The High State is the
compound state derived from a group of template files stored on the
salt master or in the local cache.
"""
# a stack of active HighState objects during a state.highstate run
stack = []
def __init__(
self,
opts,
pillar_override=None,
jid=None,
pillar_enc=None,
proxy=None,
context=None,
mocked=False,
loader="states",
initial_pillar=None,
):
self.opts = opts
self.client = salt.fileclient.get_file_client(self.opts)
BaseHighState.__init__(self, opts)
self.state = State(
self.opts,
pillar_override,
jid,
pillar_enc,
proxy=proxy,
context=context,
mocked=mocked,
loader=loader,
initial_pillar=initial_pillar,
)
self.matchers = salt.loader.matchers(self.opts)
self.proxy = proxy
# tracks all pydsl state declarations globally across sls files
self._pydsl_all_decls = {}
# a stack of current rendering Sls objects, maintained and used by the pydsl renderer.
self._pydsl_render_stack = []
def push_active(self):
self.stack.append(self)
@classmethod
def clear_active(cls):
# Nuclear option
#
# Blow away the entire stack. Used primarily by the test runner but also
# useful in custom wrappers of the HighState class, to reset the stack
# to a fresh state.
cls.stack = []
@classmethod
def pop_active(cls):
cls.stack.pop()
@classmethod
def get_active(cls):
try:
return cls.stack[-1]
except IndexError:
return None
def destroy(self):
self.client.destroy()
def __enter__(self):
return self
def __exit__(self, *_):
self.destroy()
class MasterState(State):
"""
Create a State object for master side compiling
"""
def __init__(self, opts, minion):
State.__init__(self, opts)
def load_modules(self, data=None, proxy=None):
"""
Load the modules into the state
"""
log.info("Loading fresh modules for state activity")
# Load a modified client interface that looks like the interface used
# from the minion, but uses remote execution
#
self.functions = salt.client.FunctionWrapper(self.opts, self.opts["id"])
# Load the states, but they should not be used in this class apart
# from inspection
self.utils = salt.loader.utils(self.opts)
self.serializers = salt.loader.serializers(self.opts)
self.states = salt.loader.states(
self.opts, self.functions, self.utils, self.serializers
)
self.rend = salt.loader.render(
self.opts, self.functions, states=self.states, context=self.state_con
)
class MasterHighState(HighState):
"""
Execute highstate compilation from the master
"""
def __init__(self, master_opts, minion_opts, grains, id_, saltenv=None):
# Force the fileclient to be local
opts = copy.deepcopy(minion_opts)
opts["file_client"] = "local"
opts["file_roots"] = master_opts["master_roots"]
opts["renderer"] = master_opts["renderer"]
opts["state_top"] = master_opts["state_top"]
opts["id"] = id_
opts["grains"] = grains
HighState.__init__(self, opts)
class RemoteHighState:
"""
Manage gathering the data from the master
"""
# XXX: This class doesn't seem to be used anywhere
def __init__(self, opts, grains):
self.opts = opts
self.grains = grains
# self.auth = salt.crypt.SAuth(opts)
self.channel = salt.transport.client.ReqChannel.factory(self.opts["master_uri"])
self._closing = False
def compile_master(self):
"""
Return the state data from the master
"""
load = {"grains": self.grains, "opts": self.opts, "cmd": "_master_state"}
try:
return self.channel.send(load, tries=3, timeout=72000)
except SaltReqTimeoutError:
return {}
def destroy(self):
if self._closing:
return
self._closing = True
self.channel.close()
# pylint: disable=W1701
def __del__(self):
self.destroy()
# pylint: enable=W1701
|
test_flight.py | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import contextlib
import socket
import threading
import pytest
import pyarrow as pa
flight = pytest.importorskip("pyarrow.flight")
class ConstantFlightServer(flight.FlightServerBase):
"""A Flight server that always returns the same data.
See ARROW-4796: this server implementation will segfault if Flight
does not properly hold a reference to the Table object.
"""
def do_get(self, ticket):
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
return flight.RecordBatchStream(table)
@contextlib.contextmanager
def flight_server(server_base, *args, **kwargs):
"""Spawn a Flight server on a free port, shutting it down when done."""
# Find a free port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with contextlib.closing(sock) as sock:
sock.bind(('', 0))
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
port = sock.getsockname()[1]
server_instance = server_base(*args, **kwargs)
def _server_thread():
server_instance.run(port)
thread = threading.Thread(target=_server_thread, daemon=True)
thread.start()
yield port
server_instance.shutdown()
thread.join()
def test_flight_do_get():
"""Try a simple do_get call."""
data = [
pa.array([-10, -5, 0, 5, 10])
]
table = pa.Table.from_arrays(data, names=['a'])
with flight_server(ConstantFlightServer) as server_port:
client = flight.FlightClient.connect('localhost', server_port)
data = client.do_get(flight.Ticket(b''), table.schema).read_all()
assert data.equals(table)
|
test_sys.py | import unittest, test.support
from test.support.script_helper import assert_python_ok, assert_python_failure
import sys, io, os
import struct
import subprocess
import textwrap
import warnings
import operator
import codecs
import gc
import sysconfig
import locale
# count the number of test runs, used to create unique
# strings to intern in test_intern()
numruns = 0
class SysModuleTest(unittest.TestCase):
def setUp(self):
self.orig_stdout = sys.stdout
self.orig_stderr = sys.stderr
self.orig_displayhook = sys.displayhook
def tearDown(self):
sys.stdout = self.orig_stdout
sys.stderr = self.orig_stderr
sys.displayhook = self.orig_displayhook
test.support.reap_children()
def test_original_displayhook(self):
import builtins
out = io.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(builtins, "_"):
del builtins._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(builtins, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(builtins._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
def test_lost_displayhook(self):
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
def test_custom_displayhook(self):
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
def test_original_excepthook(self):
err = io.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError as exc:
eh(*sys.exc_info())
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
def test_excepthook(self):
with test.support.captured_output("stderr") as stderr:
sys.excepthook(1, '1', 1)
self.assertTrue("TypeError: print_exception(): Exception expected for " \
"value, str found" in stderr.getvalue())
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (ascii(err), ascii(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the exit message is written with backslashreplace error
# handler to stderr
check_exit_message(
r'import sys; sys.exit("surrogates:\uDCFF")',
b"surrogates:\\udcff")
# test that the unicode message is encoded to the stderr encoding
# instead of the default encoding (utf8)
check_exit_message(
r'import sys; sys.exit("h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_switchinterval(self):
self.assertRaises(TypeError, sys.setswitchinterval)
self.assertRaises(TypeError, sys.setswitchinterval, "a")
self.assertRaises(ValueError, sys.setswitchinterval, -1.0)
self.assertRaises(ValueError, sys.setswitchinterval, 0.0)
orig = sys.getswitchinterval()
# sanity check
self.assertTrue(orig < 0.5, orig)
try:
for n in 0.00001, 0.05, 3.0, orig:
sys.setswitchinterval(n)
self.assertAlmostEqual(sys.getswitchinterval(), n)
finally:
sys.setswitchinterval(orig)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_recovery(self):
if hasattr(sys, 'gettrace') and sys.gettrace():
self.skipTest('fatal error if run with a trace function')
oldlimit = sys.getrecursionlimit()
def f():
f()
try:
for depth in (10, 25, 50, 75, 100, 250, 1000):
try:
sys.setrecursionlimit(depth)
except RecursionError:
# Issue #25274: The recursion limit is too low at the
# current recursion depth
continue
# Issue #5392: test stack overflow after hitting recursion
# limit twice
self.assertRaises(RecursionError, f)
self.assertRaises(RecursionError, f)
finally:
sys.setrecursionlimit(oldlimit)
@test.support.cpython_only
def test_setrecursionlimit_recursion_depth(self):
# Issue #25274: Setting a low recursion limit must be blocked if the
# current recursion depth is already higher than the "lower-water
# mark". Otherwise, it may not be possible anymore to
# reset the overflowed flag to 0.
from _testcapi import get_recursion_depth
def set_recursion_limit_at_depth(depth, limit):
recursion_depth = get_recursion_depth()
if recursion_depth >= depth:
with self.assertRaises(RecursionError) as cm:
sys.setrecursionlimit(limit)
self.assertRegex(str(cm.exception),
"cannot set the recursion limit to [0-9]+ "
"at the recursion depth [0-9]+: "
"the limit is too low")
else:
set_recursion_limit_at_depth(depth, limit)
oldlimit = sys.getrecursionlimit()
try:
sys.setrecursionlimit(1000)
for limit in (10, 25, 50, 75, 100, 150, 200):
# formula extracted from _Py_RecursionLimitLowerWaterMark()
if limit > 200:
depth = limit - 50
else:
depth = limit * 3 // 4
set_recursion_limit_at_depth(depth, limit)
finally:
sys.setrecursionlimit(oldlimit)
def test_recursionlimit_fatalerror(self):
# A fatal error occurs if a second recursion limit is hit when recovering
# from a first one.
code = textwrap.dedent("""
import sys
def f():
try:
f()
except RecursionError:
f()
sys.setrecursionlimit(%d)
f()""")
with test.support.SuppressCrashReport():
for i in (50, 1000):
sub = subprocess.Popen([sys.executable, '-c', code % i],
stderr=subprocess.PIPE)
err = sub.communicate()[1]
self.assertTrue(sub.returncode, sub.returncode)
self.assertIn(
b"Fatal Python error: Cannot recover from stack overflow",
err)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
def test_call_tracing(self):
self.assertRaises(TypeError, sys.call_tracing, type, 2)
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.support.refcount_test
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.__code__ \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
@test.support.reap_threads
def test_current_frames(self):
import threading
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(threading.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
for tid in d:
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
main_id = threading.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, str)
self.assertIsInstance(sys.exec_prefix, str)
self.assertIsInstance(sys.base_exec_prefix, str)
self.assertIsInstance(sys.executable, str)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.int_info), 2)
self.assertTrue(sys.int_info.bits_per_digit % 5 == 0)
self.assertTrue(sys.int_info.sizeof_digit >= 1)
self.assertEqual(type(sys.int_info.bits_per_digit), int)
self.assertEqual(type(sys.int_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertEqual(len(sys.hash_info), 9)
self.assertLess(sys.hash_info.modulus, 2**sys.hash_info.width)
# sys.hash_info.modulus should be a prime; we do a quick
# probable primality test (doesn't exclude the possibility of
# a Carmichael number)
for x in range(1, 100):
self.assertEqual(
pow(x, sys.hash_info.modulus-1, sys.hash_info.modulus),
1,
"sys.hash_info.modulus {} is a non-prime".format(
sys.hash_info.modulus)
)
self.assertIsInstance(sys.hash_info.inf, int)
self.assertIsInstance(sys.hash_info.nan, int)
self.assertIsInstance(sys.hash_info.imag, int)
algo = sysconfig.get_config_var("Py_HASH_ALGORITHM")
if sys.hash_info.algorithm in {"fnv", "siphash24"}:
self.assertIn(sys.hash_info.hash_bits, {32, 64})
self.assertIn(sys.hash_info.seed_bits, {32, 64, 128})
if algo == 1:
self.assertEqual(sys.hash_info.algorithm, "siphash24")
elif algo == 2:
self.assertEqual(sys.hash_info.algorithm, "fnv")
else:
self.assertIn(sys.hash_info.algorithm, {"fnv", "siphash24"})
else:
# PY_HASH_EXTERNAL
self.assertEqual(algo, 0)
self.assertGreaterEqual(sys.hash_info.cutoff, 0)
self.assertLess(sys.hash_info.cutoff, 8)
self.assertIsInstance(sys.maxsize, int)
self.assertIsInstance(sys.maxunicode, int)
self.assertEqual(sys.maxunicode, 0x10FFFF)
self.assertIsInstance(sys.platform, str)
self.assertIsInstance(sys.prefix, str)
self.assertIsInstance(sys.base_prefix, str)
self.assertIsInstance(sys.version, str)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
if not sys.platform.startswith('win'):
self.assertIsInstance(sys.abiflags, str)
def test_thread_info(self):
info = sys.thread_info
self.assertEqual(len(info), 3)
self.assertIn(info.name, ('nt', 'pthread', 'solaris', None))
self.assertIn(info.lock, ('semaphore', 'mutex+cond', None))
def test_43581(self):
# Can't use sys.stdout, as this is a StringIO object when
# the test runs under regrtest.
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_intern(self):
global numruns
numruns += 1
self.assertRaises(TypeError, sys.intern)
s = "never interned before" + str(numruns)
self.assertTrue(sys.intern(s) is s)
s2 = s.swapcase().swapcase()
self.assertTrue(sys.intern(s2) is s)
# Subclasses of string can't be interned, because they
# provide too much opportunity for insane things to happen.
# We don't want them in the interned dict and if they aren't
# actually interned, we don't want to create the appearance
# that they are by allowing intern() to succeed.
class S(str):
def __hash__(self):
return 123
self.assertRaises(TypeError, sys.intern, S("abc"))
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization", "isolated",
"dev_mode", "utf8_mode")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int
self.assertEqual(type(getattr(sys.flags, attr)), attr_type, attr)
self.assertTrue(repr(sys.flags))
self.assertEqual(len(sys.flags), len(attrs))
self.assertIn(sys.flags.utf8_mode, {0, 1, 2})
def assert_raise_on_new_sys_type(self, sys_attr):
# Users are intentionally prevented from creating new instances of
# sys.flags, sys.version_info, and sys.getwindowsversion.
attr_type = type(sys_attr)
with self.assertRaises(TypeError):
attr_type()
with self.assertRaises(TypeError):
attr_type.__new__(attr_type)
def test_sys_flags_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.flags)
def test_sys_version_info_no_instantiation(self):
self.assert_raise_on_new_sys_type(sys.version_info)
def test_sys_getwindowsversion_no_instantiation(self):
# Skip if not being run on Windows.
test.support.get_attribute(sys, "getwindowsversion")
self.assert_raise_on_new_sys_type(sys.getwindowsversion())
@test.support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
expected = ("\xa2" + os.linesep).encode("cp424")
self.assertEqual(out, expected)
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'?')
env["PYTHONIOENCODING"] = "ascii"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = "ascii:"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xa2))'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
env=env)
out, err = p.communicate()
self.assertEqual(out, b'')
self.assertIn(b'UnicodeEncodeError:', err)
self.assertIn(rb"'\xa2'", err)
env["PYTHONIOENCODING"] = ":surrogateescape"
p = subprocess.Popen([sys.executable, "-c", 'print(chr(0xdcbd))'],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, b'\xbd')
@unittest.skipUnless(test.support.FS_NONASCII,
'requires OS support of non-ASCII encodings')
@unittest.skipUnless(sys.getfilesystemencoding() == locale.getpreferredencoding(False),
'requires FS encoding to match locale')
def test_ioencoding_nonascii(self):
env = dict(os.environ)
env["PYTHONIOENCODING"] = ""
p = subprocess.Popen([sys.executable, "-c",
'print(%a)' % test.support.FS_NONASCII],
stdout=subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, os.fsencode(test.support.FS_NONASCII))
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c",
'import sys; print(sys.executable.encode("ascii", "backslashreplace"))'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
stdout = p.communicate()[0]
executable = stdout.strip().decode("ASCII")
p.wait()
self.assertIn(executable, ["b''", repr(sys.executable.encode("ascii", "backslashreplace"))])
def check_fsencoding(self, fs_encoding, expected=None):
self.assertIsNotNone(fs_encoding)
codecs.lookup(fs_encoding)
if expected:
self.assertEqual(fs_encoding, expected)
def test_getfilesystemencoding(self):
fs_encoding = sys.getfilesystemencoding()
if sys.platform == 'darwin':
expected = 'utf-8'
else:
expected = None
self.check_fsencoding(fs_encoding, expected)
def c_locale_get_error_handler(self, locale, isolated=False, encoding=None):
# Force the POSIX locale
env = os.environ.copy()
env["LC_ALL"] = locale
env["PYTHONCOERCECLOCALE"] = "0"
code = '\n'.join((
'import sys',
'def dump(name):',
' std = getattr(sys, name)',
' print("%s: %s" % (name, std.errors))',
'dump("stdin")',
'dump("stdout")',
'dump("stderr")',
))
args = [sys.executable, "-X", "utf8=0", "-c", code]
if isolated:
args.append("-I")
if encoding is not None:
env['PYTHONIOENCODING'] = encoding
else:
env.pop('PYTHONIOENCODING', None)
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
env=env,
universal_newlines=True)
stdout, stderr = p.communicate()
return stdout
def check_locale_surrogateescape(self, locale):
out = self.c_locale_get_error_handler(locale, isolated=True)
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
# replace the default error handler
out = self.c_locale_get_error_handler(locale, encoding=':ignore')
self.assertEqual(out,
'stdin: ignore\n'
'stdout: ignore\n'
'stderr: backslashreplace\n')
# force the encoding
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='iso8859-1:')
self.assertEqual(out,
'stdin: strict\n'
'stdout: strict\n'
'stderr: backslashreplace\n')
# have no any effect
out = self.c_locale_get_error_handler(locale, encoding=':')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
out = self.c_locale_get_error_handler(locale, encoding='')
self.assertEqual(out,
'stdin: surrogateescape\n'
'stdout: surrogateescape\n'
'stderr: backslashreplace\n')
def test_c_locale_surrogateescape(self):
self.check_locale_surrogateescape('C')
def test_posix_locale_surrogateescape(self):
self.check_locale_surrogateescape('POSIX')
def test_implementation(self):
# This test applies to all implementations equally.
levels = {'alpha': 0xA, 'beta': 0xB, 'candidate': 0xC, 'final': 0xF}
self.assertTrue(hasattr(sys.implementation, 'name'))
self.assertTrue(hasattr(sys.implementation, 'version'))
self.assertTrue(hasattr(sys.implementation, 'hexversion'))
self.assertTrue(hasattr(sys.implementation, 'cache_tag'))
version = sys.implementation.version
self.assertEqual(version[:2], (version.major, version.minor))
hexversion = (version.major << 24 | version.minor << 16 |
version.micro << 8 | levels[version.releaselevel] << 4 |
version.serial << 0)
self.assertEqual(sys.implementation.hexversion, hexversion)
# PEP 421 requires that .name be lower case.
self.assertEqual(sys.implementation.name,
sys.implementation.name.lower())
@test.support.cpython_only
def test_debugmallocstats(self):
# Test sys._debugmallocstats()
from test.support.script_helper import assert_python_ok
args = ['-c', 'import sys; sys._debugmallocstats()']
ret, out, err = assert_python_ok(*args)
self.assertIn(b"free PyDictObjects", err)
# The function has no parameter
self.assertRaises(TypeError, sys._debugmallocstats, True)
@unittest.skipUnless(hasattr(sys, "getallocatedblocks"),
"sys.getallocatedblocks unavailable on this build")
def test_getallocatedblocks(self):
try:
import _testcapi
except ImportError:
with_pymalloc = support.with_pymalloc()
else:
try:
alloc_name = _testcapi.pymem_getallocatorsname()
except RuntimeError as exc:
# "cannot get allocators name" (ex: tracemalloc is used)
with_pymalloc = True
else:
with_pymalloc = (alloc_name in ('pymalloc', 'pymalloc_debug'))
# Some sanity checks
a = sys.getallocatedblocks()
self.assertIs(type(a), int)
if with_pymalloc:
self.assertGreater(a, 0)
else:
# When WITH_PYMALLOC isn't available, we don't know anything
# about the underlying implementation: the function might
# return 0 or something greater.
self.assertGreaterEqual(a, 0)
try:
# While we could imagine a Python session where the number of
# multiple buffer objects would exceed the sharing of references,
# it is unlikely to happen in a normal test run.
self.assertLess(a, sys.gettotalrefcount())
except AttributeError:
# gettotalrefcount() not available
pass
gc.collect()
b = sys.getallocatedblocks()
self.assertLessEqual(b, a)
gc.collect()
c = sys.getallocatedblocks()
self.assertIn(c, range(b - 50, b + 50))
@test.support.requires_type_collecting
def test_is_finalizing(self):
self.assertIs(sys.is_finalizing(), False)
# Don't use the atexit module because _Py_Finalizing is only set
# after calling atexit callbacks
code = """if 1:
import sys
class AtExit:
is_finalizing = sys.is_finalizing
print = print
def __del__(self):
self.print(self.is_finalizing(), flush=True)
# Keep a reference in the __main__ module namespace, so the
# AtExit destructor will be called at Python exit
ref = AtExit()
"""
rc, stdout, stderr = assert_python_ok('-c', code)
self.assertEqual(stdout.rstrip(), b'True')
@test.support.requires_type_collecting
def test_issue20602(self):
# sys.flags and sys.float_info were wiped during shutdown.
code = """if 1:
import sys
class A:
def __del__(self, sys=sys):
print(sys.flags)
print(sys.float_info)
a = A()
"""
rc, out, err = assert_python_ok('-c', code)
out = out.splitlines()
self.assertIn(b'sys.flags', out[0])
self.assertIn(b'sys.float_info', out[1])
@unittest.skipUnless(hasattr(sys, 'getandroidapilevel'),
'need sys.getandroidapilevel()')
def test_getandroidapilevel(self):
level = sys.getandroidapilevel()
self.assertIsInstance(level, int)
self.assertGreater(level, 0)
def test_sys_tracebacklimit(self):
code = """if 1:
import sys
def f1():
1 / 0
def f2():
f1()
sys.tracebacklimit = %r
f2()
"""
def check(tracebacklimit, expected):
p = subprocess.Popen([sys.executable, '-c', code % tracebacklimit],
stderr=subprocess.PIPE)
out = p.communicate()[1]
self.assertEqual(out.splitlines(), expected)
traceback = [
b'Traceback (most recent call last):',
b' File "<string>", line 8, in <module>',
b' File "<string>", line 6, in f2',
b' File "<string>", line 4, in f1',
b'ZeroDivisionError: division by zero'
]
check(10, traceback)
check(3, traceback)
check(2, traceback[:1] + traceback[2:])
check(1, traceback[:1] + traceback[3:])
check(0, [traceback[-1]])
check(-1, [traceback[-1]])
check(1<<1000, traceback)
check(-1<<1000, [traceback[-1]])
check(None, traceback)
def test_no_duplicates_in_meta_path(self):
self.assertEqual(len(sys.meta_path), len(set(sys.meta_path)))
@unittest.skipUnless(hasattr(sys, "_enablelegacywindowsfsencoding"),
'needs sys._enablelegacywindowsfsencoding()')
def test__enablelegacywindowsfsencoding(self):
code = ('import sys',
'sys._enablelegacywindowsfsencoding()',
'print(sys.getfilesystemencoding(), sys.getfilesystemencodeerrors())')
rc, out, err = assert_python_ok('-c', '; '.join(code))
out = out.decode('ascii', 'replace').rstrip()
self.assertEqual(out, 'mbcs replace')
@test.support.cpython_only
class UnraisableHookTest(unittest.TestCase):
def write_unraisable_exc(self, exc, err_msg, obj):
import _testcapi
import types
err_msg2 = f"Exception ignored {err_msg}"
try:
_testcapi.write_unraisable_exc(exc, err_msg, obj)
return types.SimpleNamespace(exc_type=type(exc),
exc_value=exc,
exc_traceback=exc.__traceback__,
err_msg=err_msg2,
object=obj)
finally:
# Explicitly break any reference cycle
exc = None
def test_original_unraisablehook(self):
for err_msg in (None, "original hook"):
with self.subTest(err_msg=err_msg):
obj = "an object"
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
self.write_unraisable_exc(ValueError(42), err_msg, obj)
err = stderr.getvalue()
if err_msg is not None:
self.assertIn(f'Exception ignored {err_msg}: {obj!r}\n', err)
else:
self.assertIn(f'Exception ignored in: {obj!r}\n', err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('ValueError: 42\n', err)
def test_original_unraisablehook_err(self):
# bpo-22836: PyErr_WriteUnraisable() should give sensible reports
class BrokenDel:
def __del__(self):
exc = ValueError("del is broken")
# The following line is included in the traceback report:
raise exc
class BrokenStrException(Exception):
def __str__(self):
raise Exception("str() is broken")
class BrokenExceptionDel:
def __del__(self):
exc = BrokenStrException()
# The following line is included in the traceback report:
raise exc
for test_class in (BrokenDel, BrokenExceptionDel):
with self.subTest(test_class):
obj = test_class()
with test.support.captured_stderr() as stderr, \
test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
# Trigger obj.__del__()
del obj
report = stderr.getvalue()
self.assertIn("Exception ignored", report)
self.assertIn(test_class.__del__.__qualname__, report)
self.assertIn("test_sys.py", report)
self.assertIn("raise exc", report)
if test_class is BrokenExceptionDel:
self.assertIn("BrokenStrException", report)
self.assertIn("<exception str() failed>", report)
else:
self.assertIn("ValueError", report)
self.assertIn("del is broken", report)
self.assertTrue(report.endswith("\n"))
def test_original_unraisablehook_wrong_type(self):
exc = ValueError(42)
with test.support.swap_attr(sys, 'unraisablehook',
sys.__unraisablehook__):
with self.assertRaises(TypeError):
sys.unraisablehook(exc)
def test_custom_unraisablehook(self):
hook_args = None
def hook_func(args):
nonlocal hook_args
hook_args = args
obj = object()
try:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
expected = self.write_unraisable_exc(ValueError(42),
"custom hook", obj)
for attr in "exc_type exc_value exc_traceback err_msg object".split():
self.assertEqual(getattr(hook_args, attr),
getattr(expected, attr),
(hook_args, expected))
finally:
# expected and hook_args contain an exception: break reference cycle
expected = None
hook_args = None
def test_custom_unraisablehook_fail(self):
def hook_func(*args):
raise Exception("hook_func failed")
with test.support.captured_output("stderr") as stderr:
with test.support.swap_attr(sys, 'unraisablehook', hook_func):
self.write_unraisable_exc(ValueError(42),
"custom hook fail", None)
err = stderr.getvalue()
self.assertIn(f'Exception ignored in sys.unraisablehook: '
f'{hook_func!r}\n',
err)
self.assertIn('Traceback (most recent call last):\n', err)
self.assertIn('Exception: hook_func failed\n', err)
@test.support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.int_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
vsize = test.support.calcvobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), vsize('') + self.longdigit)
# but lists are
self.assertEqual(sys.getsizeof([]), vsize('Pn') + gc_header_size)
def test_errors(self):
class BadSizeof:
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof:
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class FloatSizeof:
def __sizeof__(self):
return 4.5
self.assertRaises(TypeError, sys.getsizeof, FloatSizeof())
self.assertIs(sys.getsizeof(FloatSizeof(), sentinel), sentinel)
class OverflowSizeof(int):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.support.calcvobjsize
self.assertEqual(sys.getsizeof(True), size('') + self.longdigit)
self.assertEqual(sys.getsizeof(True, -1), size('') + self.longdigit)
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# bool
check(True, vsize('') + self.longdigit)
# buffer
# XXX
# builtin_function_or_method
check(len, size('5P'))
# bytearray
samples = [b'', b'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('n2Pi') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('nP'))
# bytes
check(b'', vsize('n') + 1)
check(b'x' * 10, vsize('n') + 11)
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().__closure__[0], size('P'))
# code
def check_code_size(a, expected_size):
self.assertGreaterEqual(sys.getsizeof(a), expected_size)
check_code_size(get_cell().__code__, size('6i13P'))
check_code_size(get_cell.__code__, size('6i13P'))
def get_cell2(x):
def inner():
return x
return inner
check_code_size(get_cell2.__code__, size('6i13P') + calcsize('n'))
# complex
check(complex(0,1), size('2d'))
# method_descriptor (descriptor object)
check(str.lower, size('3PPP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('3PP'))
# getset_descriptor (descriptor object)
import collections
check(collections.defaultdict.default_factory, size('3PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('3P2P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# empty dict
check({}, size('nQ2P'))
# dict
check({"a": 1}, size('nQ2P') + calcsize('2nP2n') + 8 + (8*2//3)*calcsize('n2P'))
longdict = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(longdict, size('nQ2P') + calcsize('2nP2n') + 16 + (16*2//3)*calcsize('n2P'))
# dictionary-keyview
check({}.keys(), size('P'))
# dictionary-valueview
check({}.values(), size('P'))
# dictionary-itemview
check({}.items(), size('P'))
# dictionary iterator
check(iter({}), size('P2nPn'))
# dictionary-keyiterator
check(iter({}.keys()), size('P2nPn'))
# dictionary-valueiterator
check(iter({}.values()), size('P2nPn'))
# dictionary-itemiterator
check(iter({}.items()), size('P2nPn'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# BaseException
check(BaseException(), size('5Pb'))
# UnicodeEncodeError
check(UnicodeEncodeError("", "", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", b"", 0, 0, ""), size('5Pb 2P2nP'))
# UnicodeTranslateError
check(UnicodeTranslateError("", 0, 1, ""), size('5Pb 2P2nP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('n3P'))
# reverse
check(reversed(''), size('nP'))
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('5P2c4P3ic' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('13P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('PP'))
# classmethod
check(bar, size('PP'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pb2PPP4P'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('Pn') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('nP'))
# int
check(0, vsize(''))
check(1, vsize('') + self.longdigit)
check(-1, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.int_info.bits_per_digit
check(int(PyLong_BASE), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(int(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('PnPPP'))
# None
check(None, size(''))
# NotImplementedType
check(NotImplemented, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCapsule
# XXX
# rangeiterator
check(iter(range(1)), size('4l'))
# reverse
check(reversed(''), size('nP'))
# range
check(range(1), size('4P'))
check(range(66000), size('4P'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3nP' + PySet_MINSIZE*'nP' + '2nP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('nP'))
check(frozenset(sample), s + newsize*calcsize('nP'))
# setiterator
check(iter(set()), size('P3n'))
# slice
check(slice(0), size('3P'))
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# type
# static type: PyTypeObject
fmt = 'P2nPI13Pl4Pn9Pn11PIPP'
if hasattr(sys, 'getcounts'):
fmt += '3n2P'
s = vsize(fmt)
check(int, s)
# class
s = vsize(fmt + # PyTypeObject
'3P' # PyAsyncMethods
'36P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'2P' # PyBufferProcs
'4P')
class newstyleclass(object): pass
# Separate block for PyDictKeysObject with 8 keys and 5 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 8 + 5*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 5*self.P)
o = newstyleclass()
o.a = o.b = o.c = o.d = o.e = o.f = o.g = o.h = 1
# Separate block for PyDictKeysObject with 16 keys and 10 entries
check(newstyleclass, s + calcsize("2nP2n0P") + 16 + 10*calcsize("n2P"))
# dict with shared keys
check(newstyleclass().__dict__, size('nQ2P') + 10*self.P)
# unicode
# each tuple contains a string and its expected character size
# don't put any static strings here, as they may contain
# wchar_t or UTF-8 representations
samples = ['1'*100, '\xff'*50,
'\u0100'*40, '\uffff'*100,
'\U00010000'*30, '\U0010ffff'*100]
asciifields = "nnbP"
compactfields = asciifields + "nPn"
unicodefields = compactfields + "P"
for s in samples:
maxchar = ord(max(s))
if maxchar < 128:
L = size(asciifields) + len(s) + 1
elif maxchar < 256:
L = size(compactfields) + len(s) + 1
elif maxchar < 65536:
L = size(compactfields) + 2*(len(s) + 1)
else:
L = size(compactfields) + 4*(len(s) + 1)
check(s, L)
# verify that the UTF-8 size is accounted for
s = chr(0x4000) # 4 bytes canonical representation
check(s, size(compactfields) + 4)
# compile() will trigger the generation of the UTF-8
# representation as a side effect
compile(s, "<stdin>", "eval")
check(s, size(compactfields) + 4 + 4)
# TODO: add check that forces the presence of wchar_t representation
# TODO: add check that forces layout of unicodefields
# weakref
import weakref
check(weakref.ref(int), size('2Pn2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pn2P'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
from collections import OrderedDict
class OD(OrderedDict):
__slots__ = 'a', 'b', 'c'
check(OD(x=[]), OrderedDict(x=[]), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.support.calcobjsize
vsize = test.support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size('P'))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb is not None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_asyncgen_hooks(self):
old = sys.get_asyncgen_hooks()
self.assertIsNone(old.firstiter)
self.assertIsNone(old.finalizer)
firstiter = lambda *a: None
sys.set_asyncgen_hooks(firstiter=firstiter)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, None)
self.assertIs(hooks[1], None)
finalizer = lambda *a: None
sys.set_asyncgen_hooks(finalizer=finalizer)
hooks = sys.get_asyncgen_hooks()
self.assertIs(hooks.firstiter, firstiter)
self.assertIs(hooks[0], firstiter)
self.assertIs(hooks.finalizer, finalizer)
self.assertIs(hooks[1], finalizer)
sys.set_asyncgen_hooks(*old)
cur = sys.get_asyncgen_hooks()
self.assertIsNone(cur.firstiter)
self.assertIsNone(cur.finalizer)
if __name__ == "__main__":
unittest.main()
|
chat_server.py | """
Created on Tue Jul 22 00:47:05 2014
@author: alina, zzhang
"""
import json
import pickle as pkl
import select
import socket
import chat_group as grp
import indexer
from chat_utils import *
# for two server
from threading import Thread
import file_server
class Server:
def __init__(self):
self.new_clients = [] # list of new sockets of which the user id is not known
self.logged_name2sock = {} # dictionary mapping username to socket
self.logged_sock2name = {} # dict mapping socket to user name
self.all_sockets = []
self.group = grp.Group()
# start server
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.bind(SERVER)
self.server.listen(5)
self.all_sockets.append(self.server)
# initialize past chat indices
self.indices = {}
# sonnet
# self.sonnet_f = open('AllSonnets.txt.idx', 'rb')
# self.sonnet = pkl.load(self.sonnet_f)
# self.sonnet_f.close()
self.sonnet = indexer.PIndex("AllSonnets.txt")
def new_client(self, sock):
# add to all sockets and to new clients
print('new client...')
sock.setblocking(0)
self.new_clients.append(sock)
self.all_sockets.append(sock)
def login(self, sock):
# read the msg that should have login code plus username
try:
msg = json.loads(myrecv(sock))
print("login:", msg)
if len(msg) > 0:
if msg["action"] == "login":
name = msg["name"]
if self.group.is_member(name) != True:
# move socket from new clients list to logged clients
self.new_clients.remove(sock)
# add into the name to sock mapping
self.logged_name2sock[name] = sock
self.logged_sock2name[sock] = name
# load chat history of that user
if name not in self.indices.keys():
try:
self.indices[name] = pkl.load(open(name + '.idx', 'rb'))
except IOError: # chat index does not exist, then create one
self.indices[name] = indexer.Index(name)
print(name + ' logged in')
self.group.join(name)
mysend(sock, json.dumps({"action": "login", "status": "ok"}))
else: # a client under this name has already logged in
mysend(sock, json.dumps({"action": "login", "status": "duplicate"}))
print(name + ' duplicate login attempt')
else:
print('wrong code received')
else: # client died unexpectedly
self.logout(sock)
except:
self.all_sockets.remove(sock)
def logout(self, sock):
# remove sock from all lists
name = self.logged_sock2name[sock]
pkl.dump(self.indices[name], open(name + '.idx', 'wb'))
del self.indices[name]
del self.logged_name2sock[name]
del self.logged_sock2name[sock]
self.all_sockets.remove(sock)
self.group.leave(name)
sock.close()
# ==============================================================================
# main command switchboard
# ==============================================================================
def handle_msg(self, from_sock):
# read msg code
msg = myrecv(from_sock)
if len(msg) > 0:
# ==============================================================================
# handle connect request
# ==============================================================================
msg = json.loads(msg)
if msg["action"] == "connect":
to_name = msg["target"]
from_name = self.logged_sock2name[from_sock]
if to_name == from_name:
msg = json.dumps({"action": "connect", "status": "self"})
# connect to the peer
elif self.group.is_member(to_name):
to_sock = self.logged_name2sock[to_name]
self.group.connect(from_name, to_name)
the_guys = self.group.list_me(from_name)
msg = json.dumps({"action": "connect", "status": "success"})
for g in the_guys[1:]:
to_sock = self.logged_name2sock[g]
mysend(to_sock, json.dumps({"action": "connect", "status": "request", "from": from_name}))
else:
msg = json.dumps({"action": "connect", "status": "no-user"})
mysend(from_sock, msg)
# ==============================================================================
# handle messeage exchange: one peer for now. will need multicast later
# ==============================================================================
elif msg["action"] == "exchange":
from_name = self.logged_sock2name[from_sock]
the_guys = self.group.list_me(from_name)
# said = msg["from"]+msg["message"]
said2 = text_proc(msg["message"], from_name)
self.indices[from_name].add_msg_and_index(said2)
for g in the_guys[1:]:
to_sock = self.logged_name2sock[g]
self.indices[g].add_msg_and_index(said2)
mysend(to_sock, json.dumps({"action": "exchange", "from": msg["from"], "message": msg["message"]}))
# ==============================================================================
# listing available peers
# ==============================================================================
elif msg["action"] == "list":
from_name = self.logged_sock2name[from_sock]
msg = self.group.list_all()
mysend(from_sock, json.dumps({"action": "list", "results": msg}))
# ==============================================================================
# retrieve a sonnet
# ==============================================================================
elif msg["action"] == "poem":
poem_indx = int(msg["target"])
from_name = self.logged_sock2name[from_sock]
print(from_name + ' asks for ', poem_indx)
poem = self.sonnet.get_poem(poem_indx)
poem = '\n'.join(poem).strip()
print('here:\n', poem)
mysend(from_sock, json.dumps({"action": "poem", "results": poem}))
# ==============================================================================
# time
# ==============================================================================
elif msg["action"] == "time":
ctime = time.strftime('%d.%m.%y,%H:%M', time.localtime())
mysend(from_sock, json.dumps({"action": "time", "results": ctime}))
# ==============================================================================
# search
# ==============================================================================
elif msg["action"] == "search":
term = msg["target"]
from_name = self.logged_sock2name[from_sock]
print('search for ' + from_name + ' for ' + term)
# search_rslt = (self.indices[from_name].search(term))
search_rslt = '\n'.join([x[-1] for x in self.indices[from_name].search(term)])
print('server side search: ' + search_rslt)
mysend(from_sock, json.dumps({"action": "search", "results": search_rslt}))
# ==============================================================================
# the "from" guy has had enough (talking to "to")!
# ==============================================================================
elif msg["action"] == "disconnect":
from_name = self.logged_sock2name[from_sock]
the_guys = self.group.list_me(from_name)
self.group.disconnect(from_name)
the_guys.remove(from_name)
if len(the_guys) == 1: # only one left
g = the_guys.pop()
to_sock = self.logged_name2sock[g]
mysend(to_sock, json.dumps({"action": "disconnect"}))
# ==============================================================================
# the "from" guy really, really has had enough
# ==============================================================================
else:
# client died unexpectedly
self.logout(from_sock)
# ==============================================================================
# main loop, loops *forever*
# ==============================================================================
def run(self):
print('starting server...')
while (1):
read, write, error = select.select(self.all_sockets, [], [])
print('checking logged clients..')
for logc in list(self.logged_name2sock.values()):
if logc in read:
self.handle_msg(logc)
print('checking new clients..')
for newc in self.new_clients[:]:
if newc in read:
self.login(newc)
print('checking for new connections..')
if self.server in read:
# new client request
sock, address = self.server.accept()
self.new_client(sock)
def main():
Thread(target=file_server.main).start()
server = Server()
server.run()
if __name__ == '__main__':
main()
|
free.py | # -*- coding: utf-8 -*-
"""
Created on Sat Dec 3 03:57:53 2016
@author: yxl
"""
import threading
from time import time
class Free:
title = 'Free'
view = None
para = None
prgs = (None, 1)
asyn = True
def progress(self, i, n):
self.prgs = (i, n)
def run(self, para=None):
print('this is a plugin')
def runasyn(self, para, callback=None):
start = time()
self.run(para)
self.app.info('%s: cost %.3fs'%(self.title, time()-start))
if callback!=None:callback()
def load(self):return True
def show(self):
if self.view==None:return True
return self.app.show_para(self.title, self.view, self.para, None)
def start(self, app, para=None, callback=None):
self.app = app
if not self.load():return
if para!=None or self.show():
if para==None:para = self.para
threading.Thread(target = self.runasyn, args = (para, callback)).start()
|
Lidar.py | '''
雷达类
基于官方ROS驱动,本脚本只提供订阅"/livox/lidar"topic的后续处理
'''
# 为了某些情况ros驱动没有正常加载,但也可以用主程序类测试
try:
import rospy
from sensor_msgs.msg import PointCloud2
from sensor_msgs import point_cloud2
except:
print("[ERROR] ROS environment hasn't been successfully loaded.You can only use DepthQueue with saved PointCloud")
import cv2
import os
import numpy as np
import threading
from queue import Queue
import ctypes
import inspect
import time
from datetime import datetime
import pickle as pkl
from radar_class.config import PC_STORE_DIR,LIDAR_TOPIC_NAME
class DepthQueue(object):
def __init__(self,capacity,size,K_0,C_0,E_0):
'''
用队列关系储存点云
:param capacity: the maximum length of depth queue
:param size: image size [W,H]
:param K_0: 相机内参
:param C_0: 畸变系数
:param E_0: 雷达到相机外参
'''
self.size = size
self.depth = np.ones((size[1],size[0]),np.float64)*np.nan
self.queue = Queue(capacity)
self.K_0 = K_0
self.C_0 = C_0
self.rvec = cv2.Rodrigues(E_0[:3,:3])[0]
self.tvec = E_0[:3,3]
self.E_0 = E_0
self.init_flag = False
def push_back(self,pc:np.array):
# 当队列为空时,说明该类正在被初始化,置位初始化置位符
if self.queue.empty():
self.init_flag = True
# pc (N,3) 原始点云
# 坐标转换 由雷达坐标转化相机坐标,得到点云各点在相机坐标系中的z坐标
dpt = (self.E_0@(np.concatenate([pc,np.ones((pc.shape[0],1))],axis = 1).transpose())).transpose()[:,2]
# 得到雷达点云投影到像素平面的位置
ip = cv2.projectPoints(pc,self.rvec,self.tvec,self.K_0,self.C_0)[0].reshape(-1,2).astype(np.int)
# 判断投影点是否在图像内部
inside = np.logical_and(np.logical_and(ip[:, 0] >= 0, ip[:, 0] < self.size[0]),
np.logical_and(ip[:, 1] >= 0, ip[:, 1] < self.size[1]))
ip = ip[inside]
dpt = dpt[inside]
# 将各个点的位置[N,2]加入队列
self.queue.put(ip)
if self.queue.full():
# 队满,执行出队操作,将出队的点云中所有点对应的投影位置的值置为nan
ip_d = self.queue.get()
self.depth[ip_d[:, 1], ip_d[:, 0]] = np.nan
#TODO: 如果点云有遮挡关系,则测距测到前或后不确定
# 更新策略,将进队点云投影点的z值与原来做比较,取较小的那个
s = np.stack([self.depth[ip[:,1],ip[:,0]],dpt],axis = 1)
s = np.nanmin(s, axis=1)
self.depth[ip[:,1],ip[:,0]] = s
def depth_detect_refine(self,r):
'''
:param r: the bounding box of armor , format (x0,y0,w,h)
:return: (x0,y0,z) x0,y0是中心点在归一化相机平面的坐标前两位,z为其对应在相机坐标系中的z坐标值
'''
center = np.float32([r[0]+r[2]/2,r[1]+r[3]/2])
# 采用以中心点为基准点扩大一倍的装甲板框,并设置ROI上界和下界,防止其超出像素平面范围
area = self.depth[int(max(0,center[1]-r[3])):int(min(center[1]+r[3],self.size[1]-1)),
int(max(center[0]-r[2],0)):int(min(center[0]+r[2],self.size[0]-1))]
z = np.nanmean(area) if not np.isnan(area).all() else np.nan # 当对应ROI全为nan,则直接返回为nan
return np.concatenate([cv2.undistortPoints(center, self.K_0, self.C_0).reshape(-1),np.array([z])],axis = 0)
def detect_depth(self,rects):
'''
:param rects: List of the armor bounding box with format (x0,y0,w,h)
:return: an array, the first dimension is the amount of armors input, and the second is the location data (x0,y0,z)
x0,y0是中心点在归一化相机平面的坐标前两位,z为其对应在相机坐标系中的z坐标值
'''
if len(rects) == 0:
return []
ops = []
for rect in rects:
ops.append(self.depth_detect_refine(rect))
return np.stack(ops,axis = 0)
# 安全关闭子线程
def _async_raise(tid, exctype):
"""raises the exception, performs cleanup if needed"""
tid = ctypes.c_long(tid)
if not inspect.isclass(exctype):
exctype = type(exctype)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
if res == 0:
raise ValueError("invalid thread id")
elif res != 1:
# """if it returns a number greater than one, you're in trouble,
# and you should call it again with exc=NULL to revert the effect"""
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def stop_thread(thread):
_async_raise(thread.ident, SystemExit)
class Radar(object):
# the global member of the Radar class
__init_flag = False # 雷达启动标志
__working_flag = False # 雷达接收线程启动标志
__threading = None # 雷达接收子线程
__lock = threading.Lock() # 线程锁
__queue = [] # 一个列表,存放雷达类各个对象的Depth Queue
__record_times = 0 # 已存点云的数量
__record_list = []
__record_max_times = 100 # 最大存点云数量
def __init__(self,K_0,C_0,E_0,queue_size = 200,imgsz = (3088, 2064)):
'''
雷达处理类,对每个相机都要创建一个对象
:param K_0:相机内参
:param C_0:畸变系数
:param E_0:雷达到相机外参
:param queue_size:队列最大长度
:param imgsz:相机图像大小
'''
if not Radar.__init_flag:
# 当雷达还未有一个对象时,初始化接收节点
Radar.__laser_listener_begin(LIDAR_TOPIC_NAME)
Radar.__init_flag = True
Radar.__threading=threading.Thread(target = Radar.__main_loop,daemon=True)
self._no = len(Radar.__queue) # 该对象对应于整个雷达对象列表的序号
self._K_0 = K_0
self._C_0 = C_0
Radar.__queue.append(DepthQueue(queue_size,imgsz,K_0,C_0,E_0))
@staticmethod
def start():
'''
开始子线程,即开始spin
'''
if not Radar.__working_flag:
Radar.__threading.start()
Radar.__working_flag = True
@staticmethod
def stop():
'''
结束子线程
'''
if Radar.__working_flag:
stop_thread(Radar.__threading)
Radar.__working_flag = False
@staticmethod
def __callback(data):
'''
子线程函数,对于/livox/lidar topic数据的处理
'''
if Radar.__working_flag:
Radar.__lock.acquire()
pc = np.float32(point_cloud2.read_points_list(data, field_names=("x", "y", "z"),skip_nans=True)).reshape(-1,3)
dist = np.linalg.norm(pc,axis = 1)
pc = pc[dist>0.4] # 雷达近距离滤除
# do record
if Radar.__record_times > 0:
Radar.__record_list.append(pc)
print("[INFO] recording point cloud {0}/{1}".format(Radar.__record_max_times - Radar.__record_times,Radar.__record_max_times))
if Radar.__record_times == 1:
try:
if not os.path.exists(PC_STORE_DIR):
os.mkdir(PC_STORE_DIR)
with open("{0}/{1}.pkl"
.format(PC_STORE_DIR,
datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M')),
'wb') as f:
pkl.dump(Radar.__record_list, f)
Radar.__record_list.clear()
print("[INFO] record finished")
except: # 当出现磁盘未挂载等情况,导致文件夹都无法创建
print("[ERROR] The point cloud save dir even doesn't exist on this computer!")
Radar.__record_times -= 1
# update every class object's queue
for q in Radar.__queue:
q.push_back(pc)
Radar.__lock.release()
@staticmethod
def __laser_listener_begin(laser_node_name = "/livox/lidar"):
rospy.init_node('laser_listener', anonymous=True)
rospy.Subscriber(laser_node_name, PointCloud2,Radar.__callback)
@staticmethod
def __main_loop():
# 通过将spin放入子线程来防止其对主线程的阻塞
rospy.spin()
# 当spin调用时,subscriber就会开始轮询接收所订阅的节点数据,即不断调用callback函数
@staticmethod
def start_record():
'''
开始录制点云
'''
if Radar.__record_times == 0:
Radar.__record_times = Radar.__record_max_times
def detect_depth(self,rects):
'''
接口函数,传入装甲板bounding box返回对应(x0,y0,z_c)值
ps:这个x0,y0是归一化相机坐标系中值,与下参数中指代bounding box左上方点坐标不同
:param rects: armor bounding box, format: (x0,y0,w,h)
'''
Radar.__lock.acquire()
# 通过self.no来指定该对象对应的深度队列
results = Radar.__queue[self._no].detect_depth(rects)
Radar.__lock.release()
return results
def read(self):
'''
debug用,返回深度队列当前的深度图
'''
Radar.__lock.acquire()
depth = Radar.__queue[self._no].depth.copy()
Radar.__lock.release()
return depth
def check_radar_init(self):
'''
检查该队列绑定队列置位符,来确定雷达是否正常工作
'''
if Radar.__queue[self._no].init_flag:
Radar.__queue[self._no].init_flag = False
return True
else:
return False
def __del__(self):
Radar.stop()
if __name__ == '__main__':
# 测试demo 同时也是非常好的测距测试脚本
from radar_class.camera import read_yaml,Camera_Thread
from radar_class import locate_pick
import traceback
_,K_0,C_0,E_0,imgsz = read_yaml(0)
ra = Radar(K_0,C_0,E_0,imgsz=imgsz)
Radar.start()
cv2.namedWindow("out",cv2.WINDOW_NORMAL) # 显示雷达深度图
cv2.namedWindow("img",cv2.WINDOW_NORMAL) # 显示实际图片
cap = Camera_Thread(0)
try:
flag, frame = cap.read()
# 选定一个ROI区域来测深度
cv2.imshow("img", frame)
rect = cv2.selectROI("img", frame, False)
_,rvec,tvec = locate_pick(cap,0,0) # 用四点手动标定估计位姿
# 创建transform matrix
T = np.eye(4)
T[:3, :3] = cv2.Rodrigues(rvec)[0]
T[:3, 3] = tvec.reshape(-1)
T = np.linalg.inv(T)
key = cv2.waitKey(1)
while (flag and key != ord('q') & 0xFF):
depth = ra.read() # 获得深度图
# 分别在实际相机图和深度图上画ROI框来对照
cv2.rectangle(frame, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 0), 3)
cv2.rectangle(depth, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), 255, 3)
cv2.imshow("out", depth)
cv2.imshow("img", frame)
key = cv2.waitKey(1)
if key == ord('r') & 0xFF:
# 重选区域
rect = cv2.selectROI("img", frame, False)
if key == ord('s') & 0xFF:
# 显示世界坐标系和相机坐标系坐标和深度,以对测距效果进行粗略测试
cp = ra.detect_depth([rect]).reshape(-1)
cp = ( T @ np.concatenate(
[np.concatenate([cp[:2], np.ones(1)], axis=0) * cp[2], np.ones(1)], axis=0))[:3]
cp_eye = (np.eye(4) @ np.concatenate(
[np.concatenate([cp[:2], np.ones(1)], axis=0) * cp[2], np.ones(1)], axis=0))[:3]
print(f"target position is ({cp[0]:0.3f},{cp[1]:0.3f},{cp[2]:0.3f}) and distance is {np.linalg.norm(cp):0.3f}")
print(f"origin target position is ({cp_eye[0]:0.3f},{cp_eye[1]:0.3f},{cp_eye[2]:0.3f}) and distance is {np.linalg.norm(cp_eye):0.3f}")
flag, frame = cap.read()
except:
traceback.print_exc()
Radar.stop()
cap.release()
cv2.destroyAllWindows()
|
app.py | # -*- encoding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
import gevent.monkey
gevent.monkey.patch_all()
import argparse
import os
from functools import wraps
import time
import logging
import psutil
import six
import re
from celery import Celery
from threading import Thread
from prometheus_client import (
start_http_server,
Histogram,
Gauge,
Counter
)
#Configure loglevel with envs
loglevel = os.environ.get('LOGLEVEL', 'WARNING').upper()
logging.basicConfig(level=loglevel)
parser = argparse.ArgumentParser(description='Listens to Celery events and exports them to Prometheus')
parser.add_argument(
'--host',
dest='host',
action='store',
type=six.text_type,
default='localhost',
help='The hostname to listen on'
)
parser.add_argument(
'--port', '-p',
dest='port',
action='store',
type=int,
default=9897,
help='The port to listen on'
)
parser.add_argument(
'--monitor-memory',
dest='monitor_memory',
action='store_true',
default=False,
help='whether to monitor memory (RSS) of celery workers'
)
parser.add_argument(
'--broker',
dest='broker',
action='store',
type=six.text_type,
help='URL to Celery broker'
)
parser.add_argument(
'--tz', dest='tz',
help="Timezone used by the celery app."
)
parser.add_argument(
'--queue',
dest='queues',
action='append',
help='Celery queues to check length for'
)
# We have these counters because sometimes we might not
# be able to find out the queue time or runtime of a task,
# so we can't mark an observation in the histogram
task_submissions = Counter(
'celery_task_submissions',
'number of times a task has been submitted',
['task_name', 'exchange'],
)
task_completions = Counter(
'celery_task_completions',
'number of times a task has been completed',
['task_name', 'state', 'exchange'],
)
task_queuetime_seconds = Histogram(
'celery_task_queuetime_seconds',
'number of seconds spent in queue for celery tasks',
['task_name', 'exchange'],
buckets=(
.005, .01, .025, .05, .075, .1, .25, .5,
.75, 1.0, 2.5, 5.0, 7.5, 10.0, 100.0, float('inf')
)
)
task_runtime_seconds = Histogram(
'celery_task_runtime_seconds',
'number of seconds spent executing celery tasks',
['task_name', 'state', 'exchange'],
buckets=(
.005, .01, .025, .05, .075, .1, .25, .5,
.75, 1.0, 2.5, 5.0, 7.5, 10.0, 100.0, float('inf')
)
)
queue_length = None
queue_rss = None
def setup_metrics(app, monitor_memory=False, queues=None):
global queue_length
global queue_rss
if queues and app.conf.BROKER_URL.startswith('redis:'):
queue_length = Gauge(
'celery_queue_length',
'length of Celery queues',
['queue']
)
if monitor_memory:
queue_rss = Gauge(
'celery_queue_rss_megabytes',
'RSS of celery queue',
['queue']
)
def check_queue_lengths(app, queues):
client = app.broker_connection().channel().client
while True:
logging.error('Setting queue lengths for %s' % queues)
pipe = client.pipeline(transaction=False)
for queue in queues:
pipe.llen(queue)
for result, queue in zip(pipe.execute(), queues):
logging.error('Setting q length: %s - %s' % (queue, result))
queue_length.labels(queue).set(result)
time.sleep(45)
def check_queue_rss():
pattern = '\[celeryd: (.*@.*):MainProcess\]'
while True:
output = []
for proc in psutil.process_iter():
cmd = proc.cmdline()
if len(cmd):
token = re.findall(pattern, cmd[0])
if token:
output.append(
{
'name': token[0].split('@')[0],
'rss': proc.memory_info().rss / 1000000.0
}
)
for item in output:
queue_rss.labels(item['name']).set(item['rss'])
time.sleep(45)
def celery_monitor(app):
state = app.events.State()
def task_handler(fn):
@wraps(fn)
def wrapper(event):
state.event(event)
task = state.tasks.get(event['uuid'])
logging.error('Received a task: %s %s' % (event, task))
return fn(event, task)
return wrapper
@task_handler
def handle_started_task(event, task):
if task is not None and task.sent is not None:
queue_time = time.time() - task.sent
task_queuetime_seconds.labels(task.name, task.exchange).observe(queue_time)
task_submissions.labels(task.name, task.exchange).inc()
else:
task_submissions.labels('unknown', 'unknown').inc()
@task_handler
def handle_succeeded_task(event, task):
if task is not None:
logging.debug('Succeeded a task: %s %s %s', task.name, task.exchange, task.runtime)
task_runtime_seconds.labels(task.name, 'succeeded', task.exchange).observe(task.runtime)
task_completions.labels(task.name, 'succeeded', task.exchange).inc()
else:
logging.debug('Could not track a succeeded task')
task_completions.labels('unknown', 'succeeded', 'unknown').inc()
@task_handler
def handle_failed_task(event, task):
if task is not None:
logging.debug('Failed a task: %s %s %s', task.name, task.exchange, task.runtime)
task_runtime_seconds.labels(task.name, 'failed', task.exchange).observe(task.runtime)
task_completions.labels(task.name, 'failed', task.exchange).inc()
else:
logging.debug('Could not track a failed task')
task_completions.labels('unknown', 'failed', 'unknown').inc()
@task_handler
def handle_retried_task(event, task):
if task is not None:
logging.debug('Retried a task: %s %s %s', task.name, task.exchange, task.runtime)
task_runtime_seconds.labels(task.name, 'retried', task.exchange).observe(task.runtime)
task_completions.labels(task.name, 'retried', task.exchange).inc()
else:
logging.debug('Could not track a retried task')
task_completions.labels('unknown', 'retried', 'unknown').inc()
try_interval = 1
while True:
try:
try_interval *= 2
with app.connection() as connection:
recv = app.events.Receiver(connection, handlers={
'task-started': handle_started_task,
'task-succeeded': handle_succeeded_task,
'task-failed': handle_failed_task,
'task-retried': handle_retried_task,
'*': state.event
})
try_interval = 1
recv.capture(limit=None, timeout=None, wakeup=True)
except (KeyboardInterrupt, SystemExit):
try:
import _thread as thread
except ImportError:
import thread
thread.interrupt_main()
except Exception as e:
logging.error("Failed to capture events: '%s', "
"trying again in %s seconds.",
e, try_interval)
logging.debug(e, exc_info=True)
time.sleep(try_interval)
def run():
args = parser.parse_args()
if args.tz:
os.environ['TZ'] = args.tz
time.tzset()
app = Celery(broker=args.broker)
setup_metrics(app, monitor_memory=args.monitor_memory, queues=args.queues)
start_http_server(args.port, args.host)
if queue_length is not None:
Thread(target=check_queue_lengths, args=(app, args.queues)).start()
if queue_rss is not None:
Thread(target=check_queue_rss).start()
celery_monitor(app)
if __name__ == '__main__':
run()
|
index.py | import json
import os
import random
import requests
import socket
import time
from threading import Thread
# https://gist.github.com/butla/2d9a4c0f35ea47b7452156c96a4e7b12
def wait_for_port(port, host='localhost', timeout=5.0):
"""Wait until a port starts accepting TCP connections.
Args:
port (int): Port number.
host (str): Host address on which the port should exist.
timeout (float): In seconds. How long to wait before raising errors.
Raises:
TimeoutError: The port isn't accepting connections after time specified
in `timeout`.
"""
start_time = time.perf_counter()
while True:
try:
with socket.create_connection((host, port), timeout=timeout):
break
except OSError as ex:
time.sleep(0.01)
if time.perf_counter() - start_time >= timeout:
raise TimeoutError('Waited too long for the port {} '
'on host {} to start accepting connections.'
.format(port, host)) from ex
def mk_rpc(proto='http',
host=os.environ['REACH_RPC_SERVER'],
port=os.environ['REACH_RPC_PORT']):
def rpc(m, *args):
lab = 'RPC %s %s' % (m, json.dumps([*args]))
print(lab)
ans = requests.post('%s://%s:%s%s' % (proto, host, port, m),
json=[*args])
print('%s ==> %s' % (lab, json.dumps(ans.json())))
return ans.json()
def rpc_callbacks(m, arg, cbacks):
vals = {k: v for k, v in cbacks.items() if not callable(v)}
meths = {k: True for k, v in cbacks.items() if callable(v)}
p = rpc(m, arg, vals, meths)
while True:
if p['t'] == 'Done':
return p
elif p['t'] == 'Kont':
cback = cbacks[p['m']]
ans = cback(*p['args'])
p = rpc('/kont', p['kid'], ans)
else:
raise Exception('Illegal callback return: %s' % json.dumps(p))
wait_for_port(port, host)
return rpc, rpc_callbacks
def main():
print('I am the client')
rpc, rpc_callbacks = mk_rpc()
starting_balance = rpc('/stdlib/parseCurrency', 10)
acc_alice = rpc('/stdlib/newTestAccount', starting_balance)
acc_bob = rpc('/stdlib/newTestAccount', starting_balance)
def fmt(x):
return rpc('/stdlib/formatCurrency', x, 4)
def get_balance(w):
return fmt(rpc('/stdlib/balanceOf', w))
before_alice = get_balance(acc_alice)
before_bob = get_balance(acc_bob)
ctc_alice = rpc('/acc/deploy', acc_alice)
ctc_bob = rpc('/acc/attach', acc_bob, rpc('/ctc/getInfo', ctc_alice))
HAND = ['Rock', 'Paper', 'Scissors']
OUTCOME = ['Bob wins', 'Draw', 'Alice wins']
def player(who):
def getHand():
hand = random.randint(0, 2)
print('%s played %s' % (who, HAND[hand]))
return hand
def informTimeout():
print('%s observed a timeout' % who)
def seeOutcome(n):
print('%s saw outcome %s'
% (who, OUTCOME[rpc('/stdlib/bigNumberToNumber', n)]))
return {'stdlib.hasRandom': True,
'getHand': getHand,
'informTimeout': informTimeout,
'seeOutcome': seeOutcome,
}
def play_alice():
rpc_callbacks(
'/backend/Alice',
ctc_alice,
dict(wager=rpc('/stdlib/parseCurrency', 5), **player('Alice')))
def play_bob():
def acceptWager(amt):
print('Bob accepts the wager of %s' % fmt(amt))
rpc_callbacks(
'/backend/Bob',
ctc_bob,
dict(acceptWager=acceptWager, **player('Bob')))
alice = Thread(target=play_alice)
bob = Thread(target=play_bob)
alice.start()
bob.start()
alice.join()
bob.join()
after_alice = get_balance(acc_alice)
after_bob = get_balance(acc_bob)
print('Alice went from %s to %s' % (before_alice, after_alice))
print(' Bob went from %s to %s' % (before_bob, after_bob))
rpc('/stop')
if __name__ == '__main__':
main()
|
tests.py | """
Unit tests for reverse URL lookups.
"""
import sys
import threading
from admin_scripts.tests import AdminScriptTestCase
from django.conf import settings
from django.conf.urls import include, url
from django.contrib.auth.models import User
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.http import (
HttpRequest, HttpResponsePermanentRedirect, HttpResponseRedirect,
)
from django.shortcuts import redirect
from django.test import SimpleTestCase, TestCase, override_settings
from django.test.utils import override_script_prefix
from django.urls import (
NoReverseMatch, RegexURLPattern, RegexURLResolver, Resolver404,
ResolverMatch, get_callable, get_resolver, resolve, reverse, reverse_lazy,
)
from . import middleware, urlconf_outer, views
from .utils import URLObject
from .views import empty_view
resolve_test_data = (
# These entries are in the format: (path, url_name, app_name, namespace, view_name, func, args, kwargs)
# Simple case
('/normal/42/37/', 'normal-view', '', '', 'normal-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}),
(
'/view_class/42/37/', 'view-class', '', '', 'view-class', views.view_class_instance, tuple(),
{'arg1': '42', 'arg2': '37'}
),
(
'/included/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-normal-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/included/view_class/42/37/', 'inc-view-class', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-view-class',
views.view_class_instance, tuple(), {'arg1': '42', 'arg2': '37'}
),
# Unnamed args are dropped if you have *any* kwargs in a pattern
('/mixed_args/42/37/', 'mixed-args', '', '', 'mixed-args', views.empty_view, tuple(), {'arg2': '37'}),
(
'/included/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, tuple(), {'arg2': '37'}
),
(
'/included/12/mixed_args/42/37/', 'inc-mixed-args', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-mixed-args',
views.empty_view, tuple(), {'arg2': '37'}
),
# Unnamed views should have None as the url_name. Regression data for #21157.
(
'/unnamed/normal/42/37/', None, '', '', 'urlpatterns_reverse.views.empty_view', views.empty_view, tuple(),
{'arg1': '42', 'arg2': '37'}
),
(
'/unnamed/view_class/42/37/', None, '', '', 'urlpatterns_reverse.views.ViewClass', views.view_class_instance,
tuple(), {'arg1': '42', 'arg2': '37'}
),
# If you have no kwargs, you get an args list.
('/no_kwargs/42/37/', 'no-kwargs', '', '', 'no-kwargs', views.empty_view, ('42', '37'), {}),
(
'/included/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('42', '37'), {}
),
(
'/included/12/no_kwargs/42/37/', 'inc-no-kwargs', 'included_namespace_urls',
'included_namespace_urls', 'included_namespace_urls:inc-no-kwargs',
views.empty_view, ('12', '42', '37'), {}
),
# Namespaces
(
'/test1/inner/42/37/', 'urlobject-view', 'testapp', 'test-ns1', 'test-ns1:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/normal/42/37/', 'inc-normal-view', 'included_namespace_urls',
'inc-ns1', 'inc-ns1:inc-normal-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'included_namespace_urls:test-ns3', 'included_namespace_urls:test-ns3:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/default/inner/42/37/', 'urlobject-view', 'testapp', 'testapp', 'testapp:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/other2/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns2', 'other-ns2:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/other1/inner/42/37/', 'urlobject-view', 'nodefault', 'other-ns1', 'other-ns1:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
# Nested namespaces
(
'/ns-included1/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp',
'inc-ns1:test-ns3', 'inc-ns1:test-ns3:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/ns-included1/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-ns1:inc-ns4:inc-ns2:test-ns3',
'inc-ns1:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/test3/inner/42/37/', 'urlobject-view', 'included_namespace_urls:testapp', 'inc-app:test-ns3',
'inc-app:test-ns3:urlobject-view', views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
(
'/app-included/ns-included4/ns-included2/test3/inner/42/37/', 'urlobject-view',
'included_namespace_urls:namespace_urls:included_namespace_urls:testapp',
'inc-app:inc-ns4:inc-ns2:test-ns3',
'inc-app:inc-ns4:inc-ns2:test-ns3:urlobject-view',
views.empty_view, tuple(), {'arg1': '42', 'arg2': '37'}
),
# Namespaces capturing variables
(
'/inc70/', 'inner-nothing', 'included_urls', 'inc-ns5', 'inc-ns5:inner-nothing',
views.empty_view, tuple(), {'outer': '70'}
),
(
'/inc78/extra/foobar/', 'inner-extra', 'included_urls', 'inc-ns5', 'inc-ns5:inner-extra',
views.empty_view, tuple(), {'outer': '78', 'extra': 'foobar'}
),
)
test_data = (
('places', '/places/3/', [3], {}),
('places', '/places/3/', ['3'], {}),
('places', NoReverseMatch, ['a'], {}),
('places', NoReverseMatch, [], {}),
('places?', '/place/', [], {}),
('places+', '/places/', [], {}),
('places*', '/place/', [], {}),
('places2?', '/', [], {}),
('places2+', '/places/', [], {}),
('places2*', '/', [], {}),
('places3', '/places/4/', [4], {}),
('places3', '/places/harlem/', ['harlem'], {}),
('places3', NoReverseMatch, ['harlem64'], {}),
('places4', '/places/3/', [], {'id': 3}),
('people', NoReverseMatch, [], {}),
('people', '/people/adrian/', ['adrian'], {}),
('people', '/people/adrian/', [], {'name': 'adrian'}),
('people', NoReverseMatch, ['name with spaces'], {}),
('people', NoReverseMatch, [], {'name': 'name with spaces'}),
('people2', '/people/name/', [], {}),
('people2a', '/people/name/fred/', ['fred'], {}),
('people_backref', '/people/nate-nate/', ['nate'], {}),
('people_backref', '/people/nate-nate/', [], {'name': 'nate'}),
('optional', '/optional/fred/', [], {'name': 'fred'}),
('optional', '/optional/fred/', ['fred'], {}),
('named_optional', '/optional/1/', [1], {}),
('named_optional', '/optional/1/', [], {'arg1': 1}),
('named_optional', '/optional/1/2/', [1, 2], {}),
('named_optional', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('named_optional_terminated', '/optional/1/2/', [1, 2], {}),
('named_optional_terminated', '/optional/1/2/', [], {'arg1': 1, 'arg2': 2}),
('hardcoded', '/hardcoded/', [], {}),
('hardcoded2', '/hardcoded/doc.pdf', [], {}),
('people3', '/people/il/adrian/', [], {'state': 'il', 'name': 'adrian'}),
('people3', NoReverseMatch, [], {'state': 'il'}),
('people3', NoReverseMatch, [], {'name': 'adrian'}),
('people4', NoReverseMatch, [], {'state': 'il', 'name': 'adrian'}),
('people6', '/people/il/test/adrian/', ['il/test', 'adrian'], {}),
('people6', '/people//adrian/', ['adrian'], {}),
('range', '/character_set/a/', [], {}),
('range2', '/character_set/x/', [], {}),
('price', '/price/$10/', ['10'], {}),
('price2', '/price/$10/', ['10'], {}),
('price3', '/price/$10/', ['10'], {}),
('product', '/product/chocolate+($2.00)/', [], {'price': '2.00', 'product': 'chocolate'}),
('headlines', '/headlines/2007.5.21/', [], dict(year=2007, month=5, day=21)),
(
'windows', r'/windows_path/C:%5CDocuments%20and%20Settings%5Cspam/', [],
dict(drive_name='C', path=r'Documents and Settings\spam')
),
('special', r'/special_chars/~@+%5C$*%7C/', [r'~@+\$*|'], {}),
('special', r'/special_chars/some%20resource/', [r'some resource'], {}),
('special', r'/special_chars/10%25%20complete/', [r'10% complete'], {}),
('special', r'/special_chars/some%20resource/', [], {'chars': r'some resource'}),
('special', r'/special_chars/10%25%20complete/', [], {'chars': r'10% complete'}),
('special', NoReverseMatch, [''], {}),
('mixed', '/john/0/', [], {'name': 'john'}),
('repeats', '/repeats/a/', [], {}),
('repeats2', '/repeats/aa/', [], {}),
('repeats3', '/repeats/aa/', [], {}),
('test', '/test/1', [], {}),
('inner-nothing', '/outer/42/', [], {'outer': '42'}),
('inner-nothing', '/outer/42/', ['42'], {}),
('inner-nothing', NoReverseMatch, ['foo'], {}),
('inner-extra', '/outer/42/extra/inner/', [], {'extra': 'inner', 'outer': '42'}),
('inner-extra', '/outer/42/extra/inner/', ['42', 'inner'], {}),
('inner-extra', NoReverseMatch, ['fred', 'inner'], {}),
('inner-no-kwargs', '/outer-no-kwargs/42/inner-no-kwargs/1/', ['42', '1'], {}),
('disjunction', NoReverseMatch, ['foo'], {}),
('inner-disjunction', NoReverseMatch, ['10', '11'], {}),
('extra-places', '/e-places/10/', ['10'], {}),
('extra-people', '/e-people/fred/', ['fred'], {}),
('extra-people', '/e-people/fred/', [], {'name': 'fred'}),
('part', '/part/one/', [], {'value': 'one'}),
('part', '/prefix/xx/part/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/part2/one/', [], {'value': 'one'}),
('part2', '/part2/', [], {}),
('part2', '/prefix/xx/part2/one/', [], {'value': 'one', 'prefix': 'xx'}),
('part2', '/prefix/xx/part2/', [], {'prefix': 'xx'}),
# Tests for nested groups. Nested capturing groups will only work if you
# *only* supply the correct outer group.
('nested-noncapture', '/nested/noncapture/opt', [], {'p': 'opt'}),
('nested-capture', '/nested/capture/opt/', ['opt/'], {}),
('nested-capture', NoReverseMatch, [], {'p': 'opt'}),
('nested-mixedcapture', '/nested/capture/mixed/opt', ['opt'], {}),
('nested-mixedcapture', NoReverseMatch, [], {'p': 'opt'}),
('nested-namedcapture', '/nested/capture/named/opt/', [], {'outer': 'opt/'}),
('nested-namedcapture', NoReverseMatch, [], {'outer': 'opt/', 'inner': 'opt'}),
('nested-namedcapture', NoReverseMatch, [], {'inner': 'opt'}),
('non_path_include', '/includes/non_path_include/', [], {}),
# Tests for #13154
('defaults', '/defaults_view1/3/', [], {'arg1': 3, 'arg2': 1}),
('defaults', '/defaults_view2/3/', [], {'arg1': 3, 'arg2': 2}),
('defaults', NoReverseMatch, [], {'arg1': 3, 'arg2': 3}),
('defaults', NoReverseMatch, [], {'arg2': 1}),
# Security tests
('security', '/%2Fexample.com/security/', ['/example.com'], {}),
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.no_urls')
class NoURLPatternsTests(SimpleTestCase):
def test_no_urls_exception(self):
"""
RegexURLResolver should raise an exception when no urlpatterns exist.
"""
resolver = RegexURLResolver(r'^$', settings.ROOT_URLCONF)
with self.assertRaisesMessage(
ImproperlyConfigured,
"The included URLconf 'urlpatterns_reverse.no_urls' does not "
"appear to have any patterns in it. If you see valid patterns in "
"the file then the issue is probably caused by a circular import."
):
getattr(resolver, 'url_patterns')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class URLPatternReverse(SimpleTestCase):
def test_urlpattern_reverse(self):
for name, expected, args, kwargs in test_data:
try:
got = reverse(name, args=args, kwargs=kwargs)
except NoReverseMatch:
self.assertEqual(expected, NoReverseMatch)
else:
self.assertEqual(got, expected)
def test_reverse_none(self):
# Reversing None should raise an error, not return the last un-named view.
with self.assertRaises(NoReverseMatch):
reverse(None)
@override_script_prefix('/{{invalid}}/')
def test_prefix_braces(self):
self.assertEqual(
'/%7B%7Binvalid%7D%7D/includes/non_path_include/',
reverse('non_path_include')
)
def test_prefix_parenthesis(self):
# Parentheses are allowed and should not cause errors or be escaped
with override_script_prefix('/bogus)/'):
self.assertEqual(
'/bogus)/includes/non_path_include/',
reverse('non_path_include')
)
with override_script_prefix('/(bogus)/'):
self.assertEqual(
'/(bogus)/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/bump%20map/')
def test_prefix_format_char(self):
self.assertEqual(
'/bump%2520map/includes/non_path_include/',
reverse('non_path_include')
)
@override_script_prefix('/%7Eme/')
def test_non_urlsafe_prefix_with_args(self):
# Regression for #20022, adjusted for #24013 because ~ is an unreserved
# character. Tests whether % is escaped.
self.assertEqual('/%257Eme/places/1/', reverse('places', args=[1]))
def test_patterns_reported(self):
# Regression for #17076
with self.assertRaisesMessage(NoReverseMatch, r"1 pattern(s) tried: ['people/(?P<name>\\w+)/$']"):
# this url exists, but requires an argument
reverse("people", args=[])
@override_script_prefix('/script:name/')
def test_script_name_escaping(self):
self.assertEqual(
reverse('optional', args=['foo:bar']),
'/script:name/optional/foo:bar/'
)
def test_view_not_found_message(self):
msg = (
"Reverse for 'non-existent-view' not found. 'non-existent-view' "
"is not a valid view function or pattern name."
)
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('non-existent-view')
def test_no_args_message(self):
msg = "Reverse for 'places' with no arguments not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places')
def test_illegal_args_message(self):
msg = "Reverse for 'places' with arguments '(1, 2)' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', args=(1, 2))
def test_illegal_kwargs_message(self):
msg = "Reverse for 'places' with keyword arguments '{'arg1': 2}' not found. 1 pattern(s) tried:"
with self.assertRaisesMessage(NoReverseMatch, msg):
reverse('places', kwargs={'arg1': 2})
class ResolverTests(SimpleTestCase):
def test_resolver_repr(self):
"""
Test repr of RegexURLResolver, especially when urlconf_name is a list
(#17892).
"""
# Pick a resolver from a namespaced URLconf
resolver = get_resolver('urlpatterns_reverse.namespace_urls')
sub_resolver = resolver.namespace_dict['test-ns1'][1]
self.assertIn('<RegexURLPattern list>', repr(sub_resolver))
def test_reverse_lazy_object_coercion_by_resolve(self):
"""
Verifies lazy object returned by reverse_lazy is coerced to
text by resolve(). Previous to #21043, this would raise a TypeError.
"""
urls = 'urlpatterns_reverse.named_urls'
proxy_url = reverse_lazy('named-url1', urlconf=urls)
resolver = get_resolver(urls)
resolver.resolve(proxy_url)
def test_resolver_reverse(self):
resolver = get_resolver('urlpatterns_reverse.named_urls')
self.assertEqual(resolver.reverse('named-url1'), '')
self.assertEqual(resolver.reverse('named-url2', 'arg'), 'extra/arg/')
self.assertEqual(resolver.reverse('named-url2', extra='arg'), 'extra/arg/')
def test_non_regex(self):
"""
A Resolver404 is raised if resolving doesn't meet the basic
requirements of a path to match - i.e., at the very least, it matches
the root pattern '^/'. Never return None from resolve() to prevent a
TypeError from occurring later (#10834).
"""
with self.assertRaises(Resolver404):
resolve('')
with self.assertRaises(Resolver404):
resolve('a')
with self.assertRaises(Resolver404):
resolve('\\')
with self.assertRaises(Resolver404):
resolve('.')
def test_404_tried_urls_have_names(self):
"""
The list of URLs that come back from a Resolver404 exception contains
a list in the right format for printing out in the DEBUG 404 page with
both the patterns and URL names, if available.
"""
urls = 'urlpatterns_reverse.named_urls'
# this list matches the expected URL types and names returned when
# you try to resolve a non-existent URL in the first level of included
# URLs in named_urls.py (e.g., '/included/non-existent-url')
url_types_names = [
[{'type': RegexURLPattern, 'name': 'named-url1'}],
[{'type': RegexURLPattern, 'name': 'named-url2'}],
[{'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url3'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': 'named-url4'}],
[{'type': RegexURLResolver}, {'type': RegexURLPattern, 'name': None}],
[{'type': RegexURLResolver}, {'type': RegexURLResolver}],
]
with self.assertRaisesMessage(Resolver404, 'tried') as cm:
resolve('/included/non-existent-url', urlconf=urls)
e = cm.exception
# make sure we at least matched the root ('/') url resolver:
self.assertIn('tried', e.args[0])
self.assertEqual(
len(e.args[0]['tried']),
len(url_types_names),
'Wrong number of tried URLs returned. Expected %s, got %s.' % (
len(url_types_names), len(e.args[0]['tried'])
)
)
for tried, expected in zip(e.args[0]['tried'], url_types_names):
for t, e in zip(tried, expected):
self.assertIsInstance(t, e['type']), '%s is not an instance of %s' % (t, e['type'])
if 'name' in e:
if not e['name']:
self.assertIsNone(t.name, 'Expected no URL name but found %s.' % t.name)
else:
self.assertEqual(
t.name,
e['name'],
'Wrong URL name. Expected "%s", got "%s".' % (e['name'], t.name)
)
def test_namespaced_view_detail(self):
resolver = get_resolver('urlpatterns_reverse.nested_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view1'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.view2'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.nested_urls.View3'))
self.assertFalse(resolver._is_callback('urlpatterns_reverse.nested_urls.blub'))
def test_view_detail_as_method(self):
# Views which have a class name as part of their path.
resolver = get_resolver('urlpatterns_reverse.method_view_urls')
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.method_view'))
self.assertTrue(resolver._is_callback('urlpatterns_reverse.method_view_urls.ViewContainer.classmethod_view'))
def test_populate_concurrency(self):
"""
RegexURLResolver._populate() can be called concurrently, but not more
than once per thread (#26888).
"""
resolver = RegexURLResolver(r'^/', 'urlpatterns_reverse.urls')
resolver._local.populating = True
thread = threading.Thread(target=resolver._populate)
thread.start()
thread.join()
self.assertNotEqual(resolver._reverse_dict, {})
@override_settings(ROOT_URLCONF='urlpatterns_reverse.reverse_lazy_urls')
class ReverseLazyTest(TestCase):
def test_redirect_with_lazy_reverse(self):
response = self.client.get('/redirect/')
self.assertRedirects(response, "/redirected_to/", status_code=302)
def test_user_permission_with_lazy_reverse(self):
alfred = User.objects.create_user('alfred', 'alfred@example.com', password='testpw')
response = self.client.get('/login_required_view/')
self.assertRedirects(response, "/login/?next=/login_required_view/", status_code=302)
self.client.force_login(alfred)
response = self.client.get('/login_required_view/')
self.assertEqual(response.status_code, 200)
def test_inserting_reverse_lazy_into_string(self):
self.assertEqual(
'Some URL: %s' % reverse_lazy('some-login-page'),
'Some URL: /login/'
)
class ReverseLazySettingsTest(AdminScriptTestCase):
"""
reverse_lazy can be used in settings without causing a circular
import error.
"""
def setUp(self):
self.write_settings('settings.py', extra="""
from django.urls import reverse_lazy
LOGIN_URL = reverse_lazy('login')""")
def tearDown(self):
self.remove_settings('settings.py')
def test_lazy_in_settings(self):
out, err = self.run_manage(['check'])
self.assertNoOutput(err)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class ReverseShortcutTests(SimpleTestCase):
def test_redirect_to_object(self):
# We don't really need a model; just something with a get_absolute_url
class FakeObj:
def get_absolute_url(self):
return "/hi-there/"
res = redirect(FakeObj())
self.assertIsInstance(res, HttpResponseRedirect)
self.assertEqual(res.url, '/hi-there/')
res = redirect(FakeObj(), permanent=True)
self.assertIsInstance(res, HttpResponsePermanentRedirect)
self.assertEqual(res.url, '/hi-there/')
def test_redirect_to_view_name(self):
res = redirect('hardcoded2')
self.assertEqual(res.url, '/hardcoded/doc.pdf')
res = redirect('places', 1)
self.assertEqual(res.url, '/places/1/')
res = redirect('headlines', year='2008', month='02', day='17')
self.assertEqual(res.url, '/headlines/2008.02.17/')
with self.assertRaises(NoReverseMatch):
redirect('not-a-view')
def test_redirect_to_url(self):
res = redirect('/foo/')
self.assertEqual(res.url, '/foo/')
res = redirect('http://example.com/')
self.assertEqual(res.url, 'http://example.com/')
# Assert that we can redirect using UTF-8 strings
res = redirect('/æøå/abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5/abc/')
# Assert that no imports are attempted when dealing with a relative path
# (previously, the below would resolve in a UnicodeEncodeError from __import__ )
res = redirect('/æøå.abc/')
self.assertEqual(res.url, '/%C3%A6%C3%B8%C3%A5.abc/')
res = redirect('os.path')
self.assertEqual(res.url, 'os.path')
def test_no_illegal_imports(self):
# modules that are not listed in urlpatterns should not be importable
redirect("urlpatterns_reverse.nonimported_module.view")
self.assertNotIn("urlpatterns_reverse.nonimported_module", sys.modules)
def test_reverse_by_path_nested(self):
# Views added to urlpatterns using include() should be reversible.
from .views import nested_view
self.assertEqual(reverse(nested_view), '/includes/nested_path/')
def test_redirect_view_object(self):
from .views import absolute_kwargs_view
res = redirect(absolute_kwargs_view)
self.assertEqual(res.url, '/absolute_arg_view/')
with self.assertRaises(NoReverseMatch):
redirect(absolute_kwargs_view, wrong_argument=None)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class NamespaceTests(SimpleTestCase):
def test_ambiguous_object(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
with self.assertRaises(NoReverseMatch):
reverse('urlobject-view')
with self.assertRaises(NoReverseMatch):
reverse('urlobject-view', args=[37, 42])
with self.assertRaises(NoReverseMatch):
reverse('urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
def test_ambiguous_urlpattern(self):
"Names deployed via dynamic URL objects that require namespaces can't be resolved"
with self.assertRaises(NoReverseMatch):
reverse('inner-nothing')
with self.assertRaises(NoReverseMatch):
reverse('inner-nothing', args=[37, 42])
with self.assertRaises(NoReverseMatch):
reverse('inner-nothing', kwargs={'arg1': 42, 'arg2': 37})
def test_non_existent_namespace(self):
"Non-existent namespaces raise errors"
with self.assertRaises(NoReverseMatch):
reverse('blahblah:urlobject-view')
with self.assertRaises(NoReverseMatch):
reverse('test-ns1:blahblah:urlobject-view')
def test_normal_name(self):
"Normal lookups work as expected"
self.assertEqual('/normal/', reverse('normal-view'))
self.assertEqual('/normal/37/42/', reverse('normal-view', args=[37, 42]))
self.assertEqual('/normal/42/37/', reverse('normal-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/+%5C$*/', reverse('special-view'))
def test_simple_included_name(self):
"Normal lookups work on names included from other patterns"
self.assertEqual('/included/normal/', reverse('included_namespace_urls:inc-normal-view'))
self.assertEqual('/included/normal/37/42/', reverse('included_namespace_urls:inc-normal-view', args=[37, 42]))
self.assertEqual(
'/included/normal/42/37/',
reverse('included_namespace_urls:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/included/+%5C$*/', reverse('included_namespace_urls:inc-special-view'))
def test_namespace_object(self):
"Dynamic URL objects can be found using a namespace"
self.assertEqual('/test1/inner/', reverse('test-ns1:urlobject-view'))
self.assertEqual('/test1/inner/37/42/', reverse('test-ns1:urlobject-view', args=[37, 42]))
self.assertEqual('/test1/inner/42/37/', reverse('test-ns1:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/test1/inner/+%5C$*/', reverse('test-ns1:urlobject-special-view'))
def test_app_object(self):
"Dynamic URL objects can return a (pattern, app_name) 2-tuple, and include() can set the namespace"
self.assertEqual('/newapp1/inner/', reverse('new-ns1:urlobject-view'))
self.assertEqual('/newapp1/inner/37/42/', reverse('new-ns1:urlobject-view', args=[37, 42]))
self.assertEqual('/newapp1/inner/42/37/', reverse('new-ns1:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/newapp1/inner/+%5C$*/', reverse('new-ns1:urlobject-special-view'))
def test_app_object_default_namespace(self):
"Namespace defaults to app_name when including a (pattern, app_name) 2-tuple"
self.assertEqual('/new-default/inner/', reverse('newapp:urlobject-view'))
self.assertEqual('/new-default/inner/37/42/', reverse('newapp:urlobject-view', args=[37, 42]))
self.assertEqual(
'/new-default/inner/42/37/', reverse('newapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/new-default/inner/+%5C$*/', reverse('newapp:urlobject-special-view'))
def test_embedded_namespace_object(self):
"Namespaces can be installed anywhere in the URL pattern tree"
self.assertEqual('/included/test3/inner/', reverse('included_namespace_urls:test-ns3:urlobject-view'))
self.assertEqual(
'/included/test3/inner/37/42/', reverse('included_namespace_urls:test-ns3:urlobject-view', args=[37, 42])
)
self.assertEqual(
'/included/test3/inner/42/37/',
reverse('included_namespace_urls:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual(
'/included/test3/inner/+%5C$*/', reverse('included_namespace_urls:test-ns3:urlobject-special-view')
)
def test_namespace_pattern(self):
"Namespaces can be applied to include()'d urlpatterns"
self.assertEqual('/ns-included1/normal/', reverse('inc-ns1:inc-normal-view'))
self.assertEqual('/ns-included1/normal/37/42/', reverse('inc-ns1:inc-normal-view', args=[37, 42]))
self.assertEqual(
'/ns-included1/normal/42/37/', reverse('inc-ns1:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/ns-included1/+%5C$*/', reverse('inc-ns1:inc-special-view'))
def test_app_name_pattern(self):
"Namespaces can be applied to include()'d urlpatterns that set an app_name attribute"
self.assertEqual('/app-included1/normal/', reverse('app-ns1:inc-normal-view'))
self.assertEqual('/app-included1/normal/37/42/', reverse('app-ns1:inc-normal-view', args=[37, 42]))
self.assertEqual(
'/app-included1/normal/42/37/', reverse('app-ns1:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/app-included1/+%5C$*/', reverse('app-ns1:inc-special-view'))
def test_namespace_pattern_with_variable_prefix(self):
"When using an include with namespaces when there is a regex variable in front of it"
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/normal/', reverse('inc-outer:inc-normal-view', args=[42]))
self.assertEqual(
'/ns-outer/42/normal/37/4/',
reverse('inc-outer:inc-normal-view', kwargs={'outer': 42, 'arg1': 37, 'arg2': 4})
)
self.assertEqual('/ns-outer/42/normal/37/4/', reverse('inc-outer:inc-normal-view', args=[42, 37, 4]))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', kwargs={'outer': 42}))
self.assertEqual('/ns-outer/42/+%5C$*/', reverse('inc-outer:inc-special-view', args=[42]))
def test_multiple_namespace_pattern(self):
"Namespaces can be embedded"
self.assertEqual('/ns-included1/test3/inner/', reverse('inc-ns1:test-ns3:urlobject-view'))
self.assertEqual('/ns-included1/test3/inner/37/42/', reverse('inc-ns1:test-ns3:urlobject-view', args=[37, 42]))
self.assertEqual(
'/ns-included1/test3/inner/42/37/',
reverse('inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/ns-included1/test3/inner/+%5C$*/', reverse('inc-ns1:test-ns3:urlobject-special-view'))
def test_nested_namespace_pattern(self):
"Namespaces can be nested"
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view')
)
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/37/42/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', args=[37, 42])
)
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/42/37/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual(
'/ns-included1/ns-included4/ns-included1/test3/inner/+%5C$*/',
reverse('inc-ns1:inc-ns4:inc-ns1:test-ns3:urlobject-special-view')
)
def test_app_lookup_object(self):
"A default application namespace can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view'))
self.assertEqual('/default/inner/37/42/', reverse('testapp:urlobject-view', args=[37, 42]))
self.assertEqual('/default/inner/42/37/', reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view'))
def test_app_lookup_object_with_default(self):
"A default application namespace is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/default/inner/', reverse('testapp:urlobject-view', current_app='test-ns3'))
self.assertEqual(
'/default/inner/37/42/',
reverse('testapp:urlobject-view', args=[37, 42], current_app='test-ns3')
)
self.assertEqual(
'/default/inner/42/37/',
reverse('testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='test-ns3')
)
self.assertEqual(
'/default/inner/+%5C$*/', reverse('testapp:urlobject-special-view', current_app='test-ns3')
)
def test_app_lookup_object_without_default(self):
"An application namespace without a default is sensitive to the 'current' app can be used for lookup"
self.assertEqual('/other2/inner/', reverse('nodefault:urlobject-view'))
self.assertEqual('/other2/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42]))
self.assertEqual('/other2/inner/42/37/', reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}))
self.assertEqual('/other2/inner/+%5C$*/', reverse('nodefault:urlobject-special-view'))
self.assertEqual('/other1/inner/', reverse('nodefault:urlobject-view', current_app='other-ns1'))
self.assertEqual(
'/other1/inner/37/42/', reverse('nodefault:urlobject-view', args=[37, 42], current_app='other-ns1')
)
self.assertEqual(
'/other1/inner/42/37/',
reverse('nodefault:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='other-ns1')
)
self.assertEqual('/other1/inner/+%5C$*/', reverse('nodefault:urlobject-special-view', current_app='other-ns1'))
def test_special_chars_namespace(self):
self.assertEqual('/+%5C$*/included/normal/', reverse('special:included_namespace_urls:inc-normal-view'))
self.assertEqual(
'/+%5C$*/included/normal/37/42/',
reverse('special:included_namespace_urls:inc-normal-view', args=[37, 42])
)
self.assertEqual(
'/+%5C$*/included/normal/42/37/',
reverse('special:included_namespace_urls:inc-normal-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/+%5C$*/included/+%5C$*/', reverse('special:included_namespace_urls:inc-special-view'))
def test_namespaces_with_variables(self):
"Namespace prefixes can capture variables: see #15900"
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', kwargs={'outer': '70'}))
self.assertEqual(
'/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', kwargs={'outer': '78', 'extra': 'foobar'})
)
self.assertEqual('/inc70/', reverse('inc-ns5:inner-nothing', args=['70']))
self.assertEqual('/inc78/extra/foobar/', reverse('inc-ns5:inner-extra', args=['78', 'foobar']))
def test_nested_app_lookup(self):
"A nested current_app should be split in individual namespaces (#24904)"
self.assertEqual('/ns-included1/test4/inner/', reverse('inc-ns1:testapp:urlobject-view'))
self.assertEqual('/ns-included1/test4/inner/37/42/', reverse('inc-ns1:testapp:urlobject-view', args=[37, 42]))
self.assertEqual(
'/ns-included1/test4/inner/42/37/',
reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37})
)
self.assertEqual('/ns-included1/test4/inner/+%5C$*/', reverse('inc-ns1:testapp:urlobject-special-view'))
self.assertEqual(
'/ns-included1/test3/inner/',
reverse('inc-ns1:testapp:urlobject-view', current_app='inc-ns1:test-ns3')
)
self.assertEqual(
'/ns-included1/test3/inner/37/42/',
reverse('inc-ns1:testapp:urlobject-view', args=[37, 42], current_app='inc-ns1:test-ns3')
)
self.assertEqual(
'/ns-included1/test3/inner/42/37/',
reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37}, current_app='inc-ns1:test-ns3')
)
self.assertEqual(
'/ns-included1/test3/inner/+%5C$*/',
reverse('inc-ns1:testapp:urlobject-special-view', current_app='inc-ns1:test-ns3')
)
def test_current_app_no_partial_match(self):
"current_app should either match the whole path or shouldn't be used"
self.assertEqual(
'/ns-included1/test4/inner/',
reverse('inc-ns1:testapp:urlobject-view', current_app='non-existent:test-ns3')
)
self.assertEqual(
'/ns-included1/test4/inner/37/42/',
reverse('inc-ns1:testapp:urlobject-view', args=[37, 42], current_app='non-existent:test-ns3')
)
self.assertEqual(
'/ns-included1/test4/inner/42/37/',
reverse('inc-ns1:testapp:urlobject-view', kwargs={'arg1': 42, 'arg2': 37},
current_app='non-existent:test-ns3')
)
self.assertEqual(
'/ns-included1/test4/inner/+%5C$*/',
reverse('inc-ns1:testapp:urlobject-special-view', current_app='non-existent:test-ns3')
)
@override_settings(ROOT_URLCONF=urlconf_outer.__name__)
class RequestURLconfTests(SimpleTestCase):
def test_urlconf(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden(self):
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:,inner:/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.NullChangeURLconfMiddleware' % middleware.__name__,
]
)
def test_urlconf_overridden_with_null(self):
"""
Overriding request.urlconf with None will fall back to the default
URLconf.
"""
response = self.client.get('/test/me/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'outer:/test/me/,inner:/inner_urlconf/second_test/')
response = self.client.get('/inner_urlconf/second_test/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 404)
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_inner_in_response_middleware(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a response middleware.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInResponseMiddleware' % middleware.__name__,
]
)
def test_reverse_outer_in_response_middleware(self):
"""
Test reversing an URL from the *default* URLconf from inside
a response middleware.
"""
message = "Reverse for 'outer' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseInnerInStreaming' % middleware.__name__,
]
)
def test_reverse_inner_in_streaming(self):
"""
Test reversing an URL from the *overridden* URLconf from inside
a streaming response.
"""
response = self.client.get('/second_test/')
self.assertEqual(response.status_code, 200)
self.assertEqual(b''.join(response), b'/second_test/')
@override_settings(
MIDDLEWARE=[
'%s.ChangeURLconfMiddleware' % middleware.__name__,
'%s.ReverseOuterInStreaming' % middleware.__name__,
]
)
def test_reverse_outer_in_streaming(self):
"""
Test reversing an URL from the *default* URLconf from inside
a streaming response.
"""
message = "Reverse for 'outer' not found."
with self.assertRaisesMessage(NoReverseMatch, message):
self.client.get('/second_test/')
b''.join(self.client.get('/second_test/'))
class ErrorHandlerResolutionTests(SimpleTestCase):
"""Tests for handler400, handler404 and handler500"""
def setUp(self):
urlconf = 'urlpatterns_reverse.urls_error_handlers'
urlconf_callables = 'urlpatterns_reverse.urls_error_handlers_callables'
self.resolver = RegexURLResolver(r'^$', urlconf)
self.callable_resolver = RegexURLResolver(r'^$', urlconf_callables)
def test_named_handlers(self):
handler = (empty_view, {})
self.assertEqual(self.resolver.resolve_error_handler(400), handler)
self.assertEqual(self.resolver.resolve_error_handler(404), handler)
self.assertEqual(self.resolver.resolve_error_handler(500), handler)
def test_callable_handlers(self):
handler = (empty_view, {})
self.assertEqual(self.callable_resolver.resolve_error_handler(400), handler)
self.assertEqual(self.callable_resolver.resolve_error_handler(404), handler)
self.assertEqual(self.callable_resolver.resolve_error_handler(500), handler)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls_without_full_import')
class DefaultErrorHandlerTests(SimpleTestCase):
def test_default_handler(self):
"If the urls.py doesn't specify handlers, the defaults are used"
response = self.client.get('/test/')
self.assertEqual(response.status_code, 404)
with self.assertRaisesMessage(ValueError, "I don't think I'm getting good"):
self.client.get('/bad_view/')
@override_settings(ROOT_URLCONF=None)
class NoRootUrlConfTests(SimpleTestCase):
"""Tests for handler404 and handler500 if ROOT_URLCONF is None"""
def test_no_handler_exception(self):
with self.assertRaises(ImproperlyConfigured):
self.client.get('/test/me/')
@override_settings(ROOT_URLCONF='urlpatterns_reverse.namespace_urls')
class ResolverMatchTests(SimpleTestCase):
def test_urlpattern_resolve(self):
for path, url_name, app_name, namespace, view_name, func, args, kwargs in resolve_test_data:
# Test legacy support for extracting "function, args, kwargs"
match_func, match_args, match_kwargs = resolve(path)
self.assertEqual(match_func, func)
self.assertEqual(match_args, args)
self.assertEqual(match_kwargs, kwargs)
# Test ResolverMatch capabilities.
match = resolve(path)
self.assertEqual(match.__class__, ResolverMatch)
self.assertEqual(match.url_name, url_name)
self.assertEqual(match.app_name, app_name)
self.assertEqual(match.namespace, namespace)
self.assertEqual(match.view_name, view_name)
self.assertEqual(match.func, func)
self.assertEqual(match.args, args)
self.assertEqual(match.kwargs, kwargs)
# ... and for legacy purposes:
self.assertEqual(match[0], func)
self.assertEqual(match[1], args)
self.assertEqual(match[2], kwargs)
def test_resolver_match_on_request(self):
response = self.client.get('/resolver_match/')
resolver_match = response.resolver_match
self.assertEqual(resolver_match.url_name, 'test-resolver-match')
def test_resolver_match_on_request_before_resolution(self):
request = HttpRequest()
self.assertIsNone(request.resolver_match)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.erroneous_urls')
class ErroneousViewTests(SimpleTestCase):
def test_noncallable_view(self):
# View is not a callable (explicit import; arbitrary Python object)
with self.assertRaisesMessage(TypeError, 'view must be a callable'):
url(r'uncallable-object/$', views.uncallable)
def test_invalid_regex(self):
# Regex contains an error (refs #6170)
msg = '(regex_error/$" is not a valid regular expression'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
reverse(views.empty_view)
class ViewLoadingTests(SimpleTestCase):
def test_view_loading(self):
self.assertEqual(get_callable('urlpatterns_reverse.views.empty_view'), empty_view)
# passing a callable should return the callable
self.assertEqual(get_callable(empty_view), empty_view)
def test_exceptions(self):
# A missing view (identified by an AttributeError) should raise
# ViewDoesNotExist, ...
with self.assertRaisesMessage(ViewDoesNotExist, "View does not exist in"):
get_callable('urlpatterns_reverse.views.i_should_not_exist')
# ... but if the AttributeError is caused by something else don't
# swallow it.
with self.assertRaises(AttributeError):
get_callable('urlpatterns_reverse.views_broken.i_am_broken')
class IncludeTests(SimpleTestCase):
url_patterns = [
url(r'^inner/$', views.empty_view, name='urlobject-view'),
url(r'^inner/(?P<arg1>[0-9]+)/(?P<arg2>[0-9]+)/$', views.empty_view, name='urlobject-view'),
url(r'^inner/\+\\\$\*/$', views.empty_view, name='urlobject-special-view'),
]
app_urls = URLObject('inc-app')
def test_include_urls(self):
self.assertEqual(include(self.url_patterns), (self.url_patterns, None, None))
def test_include_namespace(self):
msg = (
"Specifying a namespace in django.conf.urls.include() without "
"providing an app_name is not supported."
)
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include(self.url_patterns, 'namespace')
def test_include_4_tuple(self):
msg = 'Passing a 4-tuple to django.conf.urls.include() is not supported.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace', 'blah'))
def test_include_3_tuple(self):
msg = 'Passing a 3-tuple to django.conf.urls.include() is not supported.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace'))
def test_include_3_tuple_namespace(self):
msg = 'Cannot override the namespace for a dynamic module that provides a namespace.'
with self.assertRaisesMessage(ImproperlyConfigured, msg):
include((self.url_patterns, 'app_name', 'namespace'), 'namespace')
def test_include_2_tuple(self):
self.assertEqual(
include((self.url_patterns, 'app_name')),
(self.url_patterns, 'app_name', 'app_name')
)
def test_include_2_tuple_namespace(self):
self.assertEqual(
include((self.url_patterns, 'app_name'), namespace='namespace'),
(self.url_patterns, 'app_name', 'namespace')
)
def test_include_app_name(self):
self.assertEqual(
include(self.app_urls),
(self.app_urls, 'inc-app', 'inc-app')
)
def test_include_app_name_namespace(self):
self.assertEqual(
include(self.app_urls, 'namespace'),
(self.app_urls, 'inc-app', 'namespace')
)
@override_settings(ROOT_URLCONF='urlpatterns_reverse.urls')
class LookaheadTests(SimpleTestCase):
def test_valid_resolve(self):
test_urls = [
'/lookahead-/a-city/',
'/lookbehind-/a-city/',
'/lookahead+/a-city/',
'/lookbehind+/a-city/',
]
for test_url in test_urls:
match = resolve(test_url)
self.assertEqual(match.kwargs, {'city': 'a-city'})
def test_invalid_resolve(self):
test_urls = [
'/lookahead-/not-a-city/',
'/lookbehind-/not-a-city/',
'/lookahead+/other-city/',
'/lookbehind+/other-city/',
]
for test_url in test_urls:
with self.assertRaises(Resolver404):
resolve(test_url)
def test_valid_reverse(self):
url = reverse('lookahead-positive', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookahead+/a-city/')
url = reverse('lookahead-negative', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookahead-/a-city/')
url = reverse('lookbehind-positive', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookbehind+/a-city/')
url = reverse('lookbehind-negative', kwargs={'city': 'a-city'})
self.assertEqual(url, '/lookbehind-/a-city/')
def test_invalid_reverse(self):
with self.assertRaises(NoReverseMatch):
reverse('lookahead-positive', kwargs={'city': 'other-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookahead-negative', kwargs={'city': 'not-a-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookbehind-positive', kwargs={'city': 'other-city'})
with self.assertRaises(NoReverseMatch):
reverse('lookbehind-negative', kwargs={'city': 'not-a-city'})
|
datapipe.py | '''
Copyright (c) <2018> <Pingcheng Zhang>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
A module to conduct IO missions in three different ways.
This module is used as a part of GSPNet project.
'''
import psycopg2
import glob
import pandas as pd
from threading import Thread
import time
import os
from multiprocessing import Queue, Process, cpu_count
class DirFileLoader:
'''
A util class to load csv files in a folder to a connected database.
Paralleled IO can provide an average of 3X speed up.
'''
def __init__(self, pattern=os.getcwd()):
'''
Initialize DirFileLoader instance
Params:
pattern: path string pattern
Attributes:
dirs: a list of csv file directory strings
dir_pool: a Queue object
res_pool: a list used to collect results
'''
self.dirs = glob.glob(pattern)
# print(self.dirs)
self.dir_pool = Queue()
self.res_pool = []
self._init_pool()
def _init_pool(self):
'''
Initialize thread/process pool for concurrent IO tasks.
'''
while not self.dir_pool.empty():
self.dir_pool.get()
for d in self.dirs:
self.dir_pool.put(d)
# print(f'Init complete:\n {list(self.dir_pool.queue)}')
def sqstart(self):
'''
Sequential loading csv file in and convert to pd.DataFrame,
serves as a base benchmark.
Params:
loc: path string, same thing as pattern param in __init__
Return:
fs: a list of pd.DataFrame objects
'''
print(f'========== Sequential Loading Start ==========')
self._init_pool()
s = time.time()
self.task_io(0)
t = time.time() - s
print(f'Read time: {round(t//60)}min {round(t%60, 8)}sec.')
def mpstart(self):
'''
Mulpti-processing IO task starter.
Delegate io functions wrapped by self.task_io wrapper to multiple
processes.
'''
print(f'\n\n========== Parallel Loading Start ==========')
self._init_pool()
s = time.time()
process_list = [Process(target=self.task_io, args=(i,))
for i in range(cpu_count())
]
for p in process_list:
p.start()
for p in process_list:
if p.is_alive():
p.join()
print(f'========== Task end in {round(time.time() - s, 4)} sec ==========\n')
def mcstart(self):
'''
Mulpti-thread IO task starter.
Delegate io functions wrapped by self.task_io wrapper to multiple
threads.
'''
print(f'\n\n========== Multi-thread Loading Start ==========')
self._init_pool()
s = time.time()
thread_list = [Thread(target=self.task_io, args=(i,))
for i in range(len(self.dirs))
]
for t in thread_list:
t.start()
for t in thread_list:
if t.is_alive():
t.join()
print(f'========== Task end in {round(time.time() - s, 4)} sec ==========\n')
def task_io(self, id: int):
'''
IO task wrapper.
The task conducted is one of the __operations.
Params:
id: task number
'''
print(f'IO task[{id}] start')
while not self.dir_pool.empty():
try:
csvfile = self.dir_pool.get(block=True, timeout=1)
# io task:
# tb = self.__readcsv(csvfile)
# self.res_pool.append(tb)
self.__csv2db(csvfile)
except Exception as e:
print(f'IO task[{id}] error: {e}')
print(f'IO task[{id}] ended.')
# Utility functions for one kind of IO opereation
def __readcsv(self, file: str) -> pd.DataFrame:
'''
IO task: read in csv.
Params:
file: file directory get from queue
Return:
tb: a pandas DataFrame
'''
print(f'Reading {file}...')
tb = pd.read_csv(file)
print(f'{file} loaded.')
return tb
def __csv2db(self, file: str):
'''
Copy csv file into a database.
Params:
file: file directory get from queue
'''
conn = psycopg2.connect(f'host=localhost dbname=taxi user=postgres')
cur = conn.cursor()
with open(file, 'r') as f:
next(f) # Skip the header row
try:
cur.copy_from(f, 'yellow_2018', sep=',')
except Exception as e:
print(f'{e}')
conn.commit()
if __name__ == '__main__':
print(os.getcwd())
loader = DirFileLoader('F:\\NY_taxi\\test\\*.csv')
loader.mcstart()
# res = seq_load('F:\\NY_taxi\\test\\*.csv')
# print(map(type, res))
|
main.py | import brain
import agent
from torch import multiprocessing as mp
from core import *
def run_agent(shared_brain, render=False, verbose=False):
"""
Run the agent.
Parameters
----------
shared_brain : brain.Brain
The shared brain the agents will use and update.
render : boolean, optional
Should the agent render its actions in the on-policy phase?
verbose : boolean, optional
Should the agent print progress to the console?
"""
if CONTROL is 'discrete':
local_agent = agent.DiscreteAgent(shared_brain, render, verbose)
else:
local_agent = agent.ContinuousAgent(shared_brain, render, verbose)
local_agent.run()
if __name__ == "__main__":
if NUMBER_OF_AGENTS == 1:
# Don't bother with multiprocessing if only one agent
run_agent(brain.brain, render=True)
else:
processes = [mp.Process(target=run_agent, args=(brain.brain, False, True))
for _ in range(NUMBER_OF_AGENTS)]
for process in processes:
process.start()
for process in processes:
process.join()
def test():
run_agent(brain.brain, render=True)
|
test_106_shutdown.py | #
# mod-h2 test suite
# check HTTP/2 timeout behaviour
#
import time
from threading import Thread
import pytest
from .env import H2Conf
from pyhttpd.result import ExecResult
class TestShutdown:
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env):
conf = H2Conf(env)
conf.add_vhost_cgi()
conf.install()
assert env.apache_restart() == 0
def test_h2_106_01(self, env):
url = env.mkurl("https", "cgi", "/necho.py")
lines = 100000
text = "123456789"
wait2 = 1.0
self.r = None
def long_request():
args = ["-vvv",
"-F", f"count={lines}",
"-F", f"text={text}",
"-F", f"wait2={wait2}",
]
self.r = env.curl_get(url, 5, options=args)
t = Thread(target=long_request)
t.start()
time.sleep(0.5)
assert env.apache_reload() == 0
t.join()
# noinspection PyTypeChecker
time.sleep(1)
r: ExecResult = self.r
assert r.exit_code == 0
assert r.response, f"no response via {r.args} in {r.stderr}\nstdout: {len(r.stdout)} bytes"
assert r.response["status"] == 200, f"{r}"
assert len(r.response["body"]) == (lines * (len(text)+1)), f"{r}"
|
run.py | import subprocess
import asyncio
import threading
import time
import os
startscreen =f"\033[32;1m.----------------. .----------------. .----------------. .----------------.\n| \033[34;1m.--------------. \033[32;1m|| \033[34;1m.--------------. \033[32;1m|| \033[34;1m.--------------. \033[32;1m|| \033[34;1m.--------------. \033[32;1m|\n| \033[34;1m|\033[30;1m ____ ____ \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m ______ \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m ____ \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m _________ \033[34;1m| \033[32;1m|\n| \033[34;1m|\033[30;1m |_ || _| \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m |_ _ \ \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m .' `. \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m | _ _ | \033[34;1m| \033[32;1m|\n| \033[34;1m|\033[30;1m | |__| | \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m | |_) | \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m / .--. \ \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m |_/ | | \_| \033[34;1m| \033[32;1m|\n| \033[34;1m|\033[30;1m | __ | \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m | __'. \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m | | | | \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m | | \033[34;1m| \033[32;1m|\n| \033[34;1m|\033[30;1m _| | | |_ \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m _| |__) | \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m \ `--' / \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m _| |_ \033[34;1m| \033[32;1m|\n| \033[34;1m|\033[30;1m |____||____| \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m |_______/ \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m `.____.' \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m |_____| \033[34;1m| \033[32;1m|\n| \033[34;1m|\033[30;1m \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m \033[34;1m| \033[32;1m|\n| \033[34;1m'--------------' \033[32;1m|| \033[34;1m'--------------' \033[32;1m|| \033[34;1m'--------------' \033[32;1m|| \033[34;1m'--------------' \033[32;1m|\n '----------------' '----------------' '----------------' '----------------'\n .----------------. .----------------. .----------------. .----------------.\n| \033[34;1m.--------------. \033[32;1m|| \033[34;1m.--------------. \033[32;1m|| \033[34;1m.--------------. \033[32;1m|| \033[34;1m.--------------. \033[32;1m|\n| \033[34;1m|\033[30;1m ______ \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m ____ \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m ____ \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m _________ \033[34;1m| \033[32;1m|\n| \033[34;1m|\033[30;1m |_ _ \ \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m .' `. \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m .' `. \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m | _ _ | \033[34;1m| \033[32;1m|\n| \033[34;1m|\033[30;1m | |_) | \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m / .--. \ \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m / .--. \ \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m |_/ | | \_| \033[34;1m| \033[32;1m|\n| \033[34;1m|\033[30;1m | __'. \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m | | | | \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m | | | | \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m | | \033[34;1m| \033[32;1m|\n| \033[34;1m|\033[30;1m _| |__) | \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m \ `--' / \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m \ `--' / \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m _| |_ \033[34;1m| \033[32;1m|\n| \033[34;1m|\033[30;1m |_______/ \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m `.____.' \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m `.____.' \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m |_____| \033[34;1m| \033[32;1m|\n| \033[34;1m|\033[30;1m \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m \033[34;1m| \033[32;1m|| \033[34;1m|\033[30;1m \033[34;1m| \033[32;1m|\n| \033[34;1m'--------------' \033[32;1m|| \033[34;1m'--------------' \033[32;1m|| \033[34;1m'--------------' \033[32;1m|| \033[34;1m'--------------' \033[32;1m|\n '----------------' '----------------' '----------------' '----------------' \033[m"
print(startscreen)
print('\033[37;1mM\033[31;1ma\033[33;1md\033[32;1me \033[34;1mb\033[36;1my \033[35;1mH\033[m')
time.sleep(1)
print('\033[35;1m------------------------------------------'+'\033[32;1m'+'['+'\033[31;1m'+'START'+'\033[32;1m'+']'+ '\033[m'+'\033[35;1m------------------------------------------\033[m')
def py():
process = subprocess.Popen(['python3', '-u', 'python/bot.py'],
stdout=subprocess.PIPE,
universal_newlines=True)
while True:
output = process.stdout.readline()
if output.strip() != '':
print('[\033[33;1mpyt\033[36;1mhon\033[m] ', output.strip())
return_code = process.poll()
if return_code is not None:
print('[\033[33;1mpyt\033[36;1mhon\033[m] ', 'RETURN CODE', return_code)
for output in process.stdout.readlines():
print('[\033[31;1mru\033[35;1mby\033[m] ', output.strip())
break
def rb():
process = subprocess.Popen(['ruby', 'ruby/bot.rb'],
stdout=subprocess.PIPE,
universal_newlines=True)
while True:
output = process.stdout.readline()
if output.strip() != '':
print('[\033[31;1mru\033[35;1mby\033[m] ', output.strip())
return_code = process.poll()
if return_code is not None:
print('[\033[31;1mru\033[35;1mby\033[m] ', 'RETURN CODE', return_code)
for output in process.stdout.readlines():
print('[\033[31;1mru\033[35;1mby\033[m] ', output.strip())
break
def js():
process = subprocess.Popen(['node', 'javascript/bot.js'],
stdout=subprocess.PIPE,
universal_newlines=True)
while True:
output = process.stdout.readline()
if output.strip() != '':
print('[\033[35;1mja\033[33;1mvascri\033[35;1mpt\033[m] ', output.strip())
return_code = process.poll()
if return_code is not None:
print('[\033[35;1mja\033[33;1mvascri\033[35;1mpt\033[m] ', 'RETURN CODE', return_code)
for output in process.stdout.readlines():
print('[\033[35;1mja\033[33;1mvascri\033[35;1mpt\033[m] ', output.strip())
break
def ph():
process = subprocess.Popen(['php', 'php/bot.php'],
stdout=subprocess.PIPE,
universal_newlines=True)
while True:
output = process.stdout.readline()
if output.strip() != '':
print('[\033[34;1mp\033[30;1mh\033[34;1mp\033[m] ', output.strip())
return_code = process.poll()
if return_code is not None:
print('[\033[34;1mp\033[30;1mh\033[34;1mp\033[m] ', 'RETURN CODE', return_code)
for output in process.stdout.readlines():
print('[\033[34;1mp\033[30;1mh\033[34;1mp\033[m] ', output.strip())
break
def lu():
process = subprocess.Popen(['luvit', 'lua/bot.lua'],
stdout=subprocess.PIPE,
universal_newlines=True)
while True:
output = process.stdout.readline()
if output.strip() != '':
print('[\033[31;1ml\033[32;1mu\033[34;1ma\033[m] ', output.strip())
return_code = process.poll()
if return_code is not None:
print('[\033[31;1ml\033[32;1mu\033[34;1ma\033[m] ', 'RETURN CODE', return_code)
for output in process.stdout.readlines():
print('[\033[31;1ml\033[32;1mu\033[34;1ma\033[m] ', output.strip())
break
def go():
process = subprocess.Popen(['go', 'run', 'golang/bot.go'],
stdout=subprocess.PIPE,
universal_newlines=True)
while True:
output = process.stdout.readline()
if output.strip() != '':
print('[\033[36;1mg\033[32;1mo\033[m] ', output.strip())
return_code = process.poll()
if return_code is not None:
print('[\033[36;1mg\033[32;1mo\033[m] ', 'RETURN CODE', return_code)
for output in process.stdout.readlines():
print('[\033[36;1mg\033[32;1mo\033[m] ', output.strip())
break
def cr():
process = subprocess.Popen(['crystal', 'crystal/bot.cr'],
stdout=subprocess.PIPE,
universal_newlines=True)
while True:
output = process.stdout.readline()
if output.strip() != '':
print('[\033[31;1mcry\033[33;1ms\033[30;1mtal\033[m] ', output.strip())
return_code = process.poll()
if return_code is not None:
print('[\033[31;1mcry\033[33;1ms\033[30;1mtal\033[m] ', 'RETURN CODE', return_code)
for output in process.stdout.readlines():
print('[\033[31;1mcry\033[33;1ms\033[30;1mtal\033[m] ', output.strip())
break
def vb():
process = subprocess.Popen(['cd visualbasic/Example1&&dotnet run'],
stdout=subprocess.PIPE,
universal_newlines=True,
shell=True)
while True:
output = process.stdout.readline()
if output.strip() != '':
print('[\033[35;1mvisual\033[34;1mbasic\033[m] ', output.strip())
return_code = process.poll()
if return_code is not None:
print('[\033[35;1mvisual\033[34;1mbasic\033[m] ', 'RETURN CODE', return_code)
for output in process.stdout.readlines():
print('[\033[35;1mvisual\033[34;1mbasic\033[m] ', output.strip())
break
def da():
process = subprocess.Popen(['dart', 'dart/bot.dart'],
stdout=subprocess.PIPE,
universal_newlines=True)
while True:
output = process.stdout.readline()
if output.strip() != '':
print('[\033[34;1mda\033[36;1mrt\033[m] ', output.strip())
return_code = process.poll()
if return_code is not None:
print('[\033[34;1mda\033[36;1mrt\033[m] ', 'RETURN CODE', return_code)
for output in process.stdout.readlines():
print('[\033[34;1mda\033[36;1mrt\033[m] ', output.strip())
break
async def main():
global threads
threads = []
pyt = threading.Thread(target=py)
pyt.start()
threads.append(pyt)
rub = threading.Thread(target=rb)
rub.start()
threads.append(rub)
jas = threading.Thread(target=js)
jas.start()
threads.append(jas)
php = threading.Thread(target=ph)
php.start()
threads.append(php)
lua = threading.Thread(target=lu)
lua.start()
threads.append(lua)
gol = threading.Thread(target=go)
gol.start()
threads.append(gol)
cry = threading.Thread(target=cr)
cry.start()
threads.append(cry)
vib = threading.Thread(target=vb)
vib.start()
threads.append(vib)
dar = threading.Thread(target=da)
dar.start()
threads.append(dar)
asyncio.run(main())
for process in threads:
process.join()
print('\033[35;1m------------------------------------------'+'\033[32;1m'+'['+'\033[31;1m'+'DONE'+'\033[32;1m'+']'+ '\033[m'+'\033[35;1m------------------------------------------\033[m')
print('\033[37;1mM\033[31;1ma\033[33;1md\033[32;1me \033[34;1mb\033[36;1my \033[35;1mH\033[m')
|
agent.py | import time
import math
from threading import Thread
import vehicle
import pygame
msgHeader = "[AGENT]: "
class Agent():
def __init__(self, ID, agentType="robot", vehicleType="car", strategyFile=None):
self.ID = str(ID)
if vehicleType.lower() == "car":
self.vehicle = vehicle.Car(self)
elif vehicleType.lower() == "truck":
self.vehicle = vehicle.Truck(self)
elif vehicleType.lower() == "motorcycle":
self.vehicle = vehicle.Motorcycle(self)
elif vehicleType.lower() == "bicycle":
self.vehicle = vehicle.Bicycle(self)
else:
print(msgHeader + "Could not initialise Agent " + self.ID + " with vehicle type '" + vehicleType + "'.")
self.vehicle = vehicle.Car(self)
self.worldKnowledge = {}
self.strategy = None
if strategyFile is not None and strategyFile != "Manual": # Do not look for 'manual' strategy file
try:
self.strategy = import_file("strategy", strategyFile)
print(msgHeader + "Successfully loaded the strategy file for Agent " + self.ID + ".")
except:
print(msgHeader + "Could not load the strategy file for Agent " + self.ID + ". (Fatal)")
exit()
elif strategyFile is not None and strategyFile == "Manual": # Mark agent as a manual car
self.strategy = "Manual"
print(msgHeader + "Manual agent successfully initialised.")
self.stopped = False
def start(self):
if self.strategy == "Manual":
t_process = Thread(target=self.manual_control)
t_process.daemon = True
t_process.start()
return self
else:
t_process = Thread(target=self.update)
t_process.daemon = True
t_process.start()
return self
def update(self):
while True:
if self.stopped or not self.strategy:
return
self.strategy.make_decision(self)
time.sleep(0.2)
def stop(self):
self.vehicle.stop()
self.stopped = True
def update_world_knowledge(self, worldData):
for key in self.worldKnowledge:
if key in worldData:
self.worldKnowledge[key] = worldData[key]
def aim_speed(self, speed):
cspeed = self.vehicle.current_speed
if (cspeed is None):
cspeed = 0
if (speed > cspeed):
diff = speed - cspeed
if (diff > self.vehicle.max_acceleration):
diff = self.vehicle.max_acceleration
self.vehicle.set_speed(cspeed + diff)
else:
diff = cspeed - speed
if (diff > self.vehicle.max_deceleration):
diff = self.vehicle.max_deceleration
self.vehicle.set_speed(cspeed - diff)
def aim_angle(self, angle):
cangle = self.vehicle.orientation
if (cangle is None):
cangle = 0
diff = int(math.fabs(angle - cangle))
if (diff > 180):
diff = 360 - diff
if (cangle < angle):
da = -diff
else:
da = diff
else:
if (cangle < angle):
da = diff
else:
da = -diff
self.vehicle.set_angle(da // 3)
def get_vector_between_points(self, x1, y1, x2, y2):
if (x1 != None and y1 != None):
dx = x2 - x1
dy = y2 - y1
dist = int(math.sqrt(dx * dx + dy * dy))
theta = 0
if (dx != 0):
theta = math.atan(dy / dx) * (180 / math.pi)
if (dx == 0):
if (dy <= 0):
theta = 0
else:
theta = 180
elif (dy == 0):
if (dx < 0):
theta = 270
else:
theta = 90
elif (dx > 0 and dy > 0):
theta = theta + 90
elif (dx > 0 and dy < 0):
theta = theta + 90
elif (dx < 0 and dy > 0):
theta = theta + 270
elif (dx < 0 and dy < 0):
theta = theta + 270
return (dist, theta)
return (None, None)
# Return Distance and Angle to current waypoint. Angle must be degrees clockwise from north
def get_vector_to_waypoint(self):
if (self.vehicle.position[0] != None and self.vehicle.position[1] != None):
wpi = self.get_waypoint_index()
if (wpi != None):
if (self.worldKnowledge['waypoints'] != []):
x1 = self.vehicle.position[0]
y1 = self.vehicle.position[1]
x2 = self.worldKnowledge['waypoints'][wpi][0]
y2 = self.worldKnowledge['waypoints'][wpi][1]
return self.get_vector_between_points(x1, y1, x2, y2)
return (None, None)
# Return current waypoint index
def get_waypoint_index(self):
return self.worldKnowledge['waypoint_index']
# Set current waypoint index
def set_waypoint_index(self, wp):
mmax = len(self.worldKnowledge['waypoints']) - 1
if (wp > mmax):
wp = 0
if (wp < 0):
wp = mmax
self.worldKnowledge['waypoint_index'] = wp
def manual_control(self):
joystick = pygame.joystick.Joystick(0)
prev_axes = (0, 0) # Use to check if axis input has been made
prev_buttons = (0, 0, 0, 0, 0, 0, 0, 0)# Use to check if button has been pressed
while True:
if self.stopped:
return
else:
# Get gamepad button and axis states
left_stick = joystick.get_axis(0)
left_trigger = joystick.get_axis(2)
right_trigger = joystick.get_axis(5)
left_bumper = joystick.get_button(4)
right_bumper = joystick.get_button(5)
A = joystick.get_button(0)
B = joystick.get_button(1)
X = joystick.get_button(2)
Y = joystick.get_button(3)
L3 = joystick.get_button(8)
R3 = joystick.get_button(9)
axes = (left_stick, left_trigger, right_trigger)
buttons = (left_bumper, right_bumper, A, B, X, Y, L3, R3)
if axes != prev_axes:
self.vehicle.set_angle(int(64*left_stick)) # Left stick controls left/right steering)
# If both right trigger and left trigger are pressed simultaneously, right trigger takes precedence
if right_trigger >= -0.8:
self.vehicle.set_speed(int(63*((right_trigger+1)/2))) # Set forward speed according to right trigger press
elif left_trigger >= -0.8:
self.vehicle.set_speed(int(-64*((left_trigger+1)/2))) # Set backward speed according to left trigger press
elif right_trigger < -0.8 and left_trigger < -0.8: # If neither trigger is being pressed, stop the car
self.vehicle.stop()
prev_axes = axes
if buttons != prev_buttons:
if X and not self.vehicle.horn_active: # X activates horn
self.vehicle.horn_on()
if not X and self.vehicle.horn_active: # Deactivate horn when X is released
self.vehicle.horn_off()
if left_bumper: # Left bumper toggles left indicator
if self.vehicle.left_signal_active:
self.vehicle.left_signal_off()
else:
self.vehicle.left_signal_on()
if right_bumper: # Right bumper toggles right indicator
if self.vehicle.right_signal_active:
self.vehicle.right_signal_off()
else:
self.vehicle.right_signal_on()
if Y: # Y toggles headlights
if self.vehicle.headlights_active:
self.vehicle.headlights_off()
else:
self.vehicle.headlights_on()
if B: # B toggles police siren
if self.vehicle.police_siren_active:
self.vehicle.police_siren_off()
else:
self.vehicle.police_siren_on()
def import_file(full_name, path):
from importlib import util
spec = util.spec_from_file_location(full_name, path)
mod = util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod
|
main.py | import os
from queue import Queue
from threading import Thread
from urllib.parse import urlparse, parse_qs
from bottle import route, run, static_file, request
from youtube_dl import YoutubeDL
app_defaults = {
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '256',
}],
}
info_conf = {
'format': 'bestaudio/best',
'outtmpl': './downloaded/%(id)s.%(ext)s',
}
@route('/', method='GET')
def index():
return static_file('index.html', root='public', mimetype='text/html')
@route('/css/<filename:re:.*\.css>', method='GET')
def stylesheets(filename):
return static_file(filename, root='public/css', mimetype='text/css')
@route('/css/<filename:re:.*\.map>', method='GET')
def stylesheets_map(filename):
return static_file(filename, root='public/css', mimetype='application/octet-stream')
@route('/js/<filename:re:.*\.js>', method='GET')
def javascripts(filename):
return static_file(filename, root='public/js', mimetype='application/javascript')
@route('/<language>', method='GET')
def frontend_download(language):
return static_file('index.html', root='public/lang/' + language, mimetype='text/html')
@route('/play/<video_id>', method='GET')
def serve_webm(video_id):
return static_file(video_id + '.webm', root='downloaded/webm', mimetype='audio/webm')
@route('/play/<file_format>/<video_id>', method='GET')
def serve_audio(file_format, video_id):
return static_file(video_id + '.' + file_format, root='downloaded/' + file_format, mimetype='audio/' + file_format)
@route('/api/<apiname>')
def api(apiname):
try:
query = request.query
if apiname == 'info':
forced = False
file_format = 'webm'
if query.get('forced'):
forced = str2bool(query.get('forced'))
if query.get('format'):
file_format = query.get('format')
return info(query['video'], forced, file_format)
elif apiname == 'download':
url = query.video
yt_queue.put(url)
return {"success": True, "message": "Video successfully added to download queue."}
except KeyError:
return {'status': False, 'message': 'Some parameters is missing!'}
def info(video, forced=False, file_format='webm'):
can_internal_uri = forced
allowed_formats = get_allowed_formats()
conf = adjust_conf(info_conf, file_format)
if not file_format in allowed_formats:
return {
'status': False,
'message': 'Wrong file format. Supported file formats: ' + ', '.join(allowed_formats)
}
if os.path.isfile('./downloaded/' + file_format + '/' + extract_video_id(video) + '.' + file_format):
forced = False
with YoutubeDL(conf) as ydl:
info_dict = ydl.extract_info(video, forced)
if can_internal_uri:
url = get_host() + '/play/'
if file_format in allowed_formats and file_format != 'webm':
url += file_format + '/'
url += info_dict.get("id", None)
else:
url = info_dict.get("url", None)
return {
'status': True,
'id': info_dict.get("id", None),
'title': info_dict.get('title', None),
'description': info_dict.get('description', None),
'url': url
}
def adjust_conf(conf=None, file_format='webm'):
if conf is None:
conf = {}
if file_format == 'mp3':
conf.update({
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '256',
}]
})
elif file_format == 'm4a':
conf.update({
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'm4a',
'preferredquality': '256',
}]
})
conf.update({
'outtmpl': './downloaded/' + file_format + '/%(id)s.%(ext)s',
})
return conf
def download(url):
with YoutubeDL(app_defaults) as ydl:
ydl.download([url])
@route('/api/queue', method='GET')
def q_size():
return {"status": True, "size": list(yt_queue.queue)}
def work_to_me():
while not done:
url = yt_queue.get()
download(url)
yt_queue.task_done()
def extract_video_id(url):
url_data = urlparse(url)
query = parse_qs(url_data.query)
return query["v"][0]
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
def get_host():
env = os.environ.get('YOUTUBEDL_API_HOST')
if not env:
parts = request.urlparts
env = parts.scheme + '://' + parts.netloc
return env
def get_allowed_formats():
formats = ['webm', 'm4a', 'mp3']
env = os.environ.get('YOUTUBEDL_API_FORMATS')
if env:
formats = env.split(',')
return formats
yt_queue = Queue()
done = False
thread = Thread(target=work_to_me)
thread.start()
print("Started download thread")
run(host='0.0.0.0', port=1998)
done = True
thread.join()
|
Hiwin_RT605_ArmCommand_Socket_20190627204140.py | #!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
#Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
#self.get_connect()
pass
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def close(self):
self.s.close()
Socket = client()
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global data
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", Socket)
#Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
Socket.send(data)
##-----------socket client--------
def socket_client():
#global Socket
try:
#Socket = client()
Socket.get_connect()
#Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
Socket_feedback(Socket)
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.get_recieve()
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
command.py | import base64
import json
import time
import threading
import tornado.httpserver
import tornado.ioloop
import tornado.web
from basescript import BaseScript
import util
from messagestore import *
from gmailhistory import GmailHistory
class RequestHandler(tornado.web.RequestHandler):
DESC = "Gets realtime messages through gmail pub/sub webhooks"
def post(self):
"""
ref: https://developers.google.com/gmail/api/guides/push#receiving_notifications
"""
data = json.loads(self.request.body)
msg_data = base64.urlsafe_b64decode(str(data["message"]["data"]))
if "historyId" not in msg_data:
return
try:
gmail = GmailCommand().get_gmail_obj()
gmail.get_new_msg()
except IOError as err: # TODO: Diskdict error
log.exception(err)
class GmailCommand(BaseScript):
DESC = "A tool to get the data from gmail and store it in database"
def _parse_msg_target_arg(self, t):
"""
>>> from command import GmailCommand
>>> obj = GmailCommand()
>>> obj._parse_msg_target_arg('forwarder=gmaildump.messagestore.SQLiteStore:db_name=gmail_sqlite:table_name=gmail_dump_sqlit')
('gmaildump.messagestore.SQLiteStore', {'db_name': 'gmail_sqlite', 'table_name': 'gmail_dump_sqlit'})
"""
path, args = t.split(":", 1)
path = path.split("=")[1]
args = dict(a.split("=", 1) for a in args.split(":"))
return path, args
def msg_store(self):
targets = []
for t in self.args.target:
imp_path, args = self._parse_msg_target_arg(t)
target_class = util.load_object(imp_path)
target_obj = target_class(**args)
targets.append(target_obj)
return targets
def listen_realtime(self):
self.log.info("Running tornodo on the machine")
app = tornado.web.Application(handlers=[(r"/", RequestHandler)])
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(self.args.tornodo - port)
tornado.ioloop.IOLoop.instance().start()
def get_gmail_obj(self):
targets = self.msg_store()
gmail = GmailHistory(
cred_path=self.args.credentials - path,
query=self.args.api - query,
topic_name=self.args.sub - topic,
file_path=self.args.file - path,
status_path=self.args.status - path,
targets=targets,
log=self.log,
)
gmail.authorize() # authorizing gmail service in order to make gmail api calls
return gmail
def run(self):
gmail = self.get_gmail_obj()
# start getting the gmail msgs from users mailbox
gmail.start()
# call gmail api watch request every day
th = threading.Thread(target=gmail.renew_mailbox_watch)
th.daemon = True
th.start()
self.thread_watch_gmail = th
# listen for real time msgs on tornodo specified port
self.listen_realtime()
def define_args(self, parser):
# gmail api arguments
parser.add_argument(
"-cred",
"--credentials-path",
required=True,
help="directory path to get the client \
secret and credential files for gmail \
api authentication",
)
parser.add_argument(
"-gmail_topic",
"--sub-topic",
required=True,
help="The topic to which \
webhooks or push notifications has subscibed from pub/sub",
)
parser.add_argument(
"-query",
"--api-query",
nargs="?",
help="query to get required msgs,\
eg: from: support@deepcompute.com\
ref:https://support.google.com/mail/answer/7190?hl=en",
)
# attachments arguments
parser.add_argument(
"-f",
"--file-path",
nargs="?",
help="The path of the directory where user\
want to save gmail inbox attachments. By default attachements \
will not been stored",
)
# diskdict arguments
parser.add_argument(
"-status_path",
"--status-path",
default="/tmp",
help="File path where the status of gmail \
messages needs to be stored. Default path: /tmp/",
)
# database arguments
parser.add_argument(
"-target",
"--target",
nargs="+",
help='format for Mongo: store=<MongoStore-classpath>:db_name=<database-name>:collection_name=<collection-name> \
format for SQLite: store=<SQLiteStore-classpath>:host=<hostname>:port=<port-number>:db_name=<db-name>:table_name=<table-name>" \
format for NSQ: store=<NsqStore-classpath>:host=<hostname>:port=<port-number>:topic=<topic-name> \
format for file: store=<FileStore-classpath>:file_path=<file-path>',
)
# tornodo arguments
parser.add_argument(
"-tp",
"--tornodo-port",
nargs="?",
default=8788,
help="port in which tornodo needs to run to get realtime msgs\
default port: 8788",
)
def main():
GmailCommand().start()
|
trex_subscriber.py | #!/router/bin/python
import json
import threading
import time
import datetime
import zmq
import re
import random
import os
import signal
import traceback
import sys
from .trex_types import RC_OK, RC_ERR
#from .trex_stats import *
from ..utils.text_opts import format_num
from ..utils.zipmsg import ZippedMsg
# basic async stats class
class CTRexAsyncStats(object):
def __init__ (self):
self.ref_point = None
self.current = {}
self.last_update_ts = datetime.datetime.now()
def update (self, snapshot):
#update
self.last_update_ts = datetime.datetime.now()
self.current = snapshot
if self.ref_point == None:
self.ref_point = self.current
def clear(self):
self.ref_point = self.current
def get(self, field, format=False, suffix=""):
if field not in self.current:
return "N/A"
if not format:
return self.current[field]
else:
return format_num(self.current[field], suffix)
def get_rel (self, field, format=False, suffix=""):
if field not in self.current:
return "N/A"
if not format:
return (self.current[field] - self.ref_point[field])
else:
return format_num(self.current[field] - self.ref_point[field], suffix)
# return true if new data has arrived in the past 2 seconds
def is_online (self):
delta_ms = (datetime.datetime.now() - self.last_update_ts).total_seconds() * 1000
return (delta_ms < 2000)
# describes the general stats provided by TRex
class CTRexAsyncStatsGeneral(CTRexAsyncStats):
def __init__ (self):
super(CTRexAsyncStatsGeneral, self).__init__()
# per port stats
class CTRexAsyncStatsPort(CTRexAsyncStats):
def __init__ (self):
super(CTRexAsyncStatsPort, self).__init__()
def get_stream_stats (self, stream_id):
return None
# stats manager
class CTRexAsyncStatsManager():
def __init__ (self):
self.general_stats = CTRexAsyncStatsGeneral()
self.port_stats = {}
def get_general_stats(self):
return self.general_stats
def get_port_stats (self, port_id):
if str(port_id) not in self.port_stats:
return None
return self.port_stats[str(port_id)]
def update(self, data):
self.__handle_snapshot(data)
def __handle_snapshot(self, snapshot):
general_stats = {}
port_stats = {}
# filter the values per port and general
for key, value in snapshot.items():
# match a pattern of ports
m = re.search('(.*)\-([0-8])', key)
if m:
port_id = m.group(2)
field_name = m.group(1)
if port_id not in port_stats:
port_stats[port_id] = {}
port_stats[port_id][field_name] = value
else:
# no port match - general stats
general_stats[key] = value
# update the general object with the snapshot
self.general_stats.update(general_stats)
# update all ports
for port_id, data in port_stats.items():
if port_id not in self.port_stats:
self.port_stats[port_id] = CTRexAsyncStatsPort()
self.port_stats[port_id].update(data)
class ServerEventsIDs(object):
"""
server event IDs
(in sync with the server IDs)
"""
EVENT_PORT_STARTED = 0
EVENT_PORT_STOPPED = 1
EVENT_PORT_PAUSED = 2
EVENT_PORT_RESUMED = 3
EVENT_PORT_JOB_DONE = 4
EVENT_PORT_ACQUIRED = 5
EVENT_PORT_RELEASED = 6
EVENT_PORT_ERROR = 7
EVENT_PORT_ATTR_CHG = 8
EVENT_ASTF_STATE_CHG = 50
EVENT_SERVER_STOPPED = 100
class TRexSubscriber():
THREAD_STATE_ACTIVE = 1
THREAD_STATE_ZOMBIE = 2
THREAD_STATE_DEAD = 3
def __init__ (self, ctx, rpc):
self.ctx = ctx
self.port = ctx.async_port
self.server = ctx.server
self.rpc = rpc
self.event_handler = ctx.event_handler
self.raw_snapshot = {}
self.stats = CTRexAsyncStatsManager()
self.last_data_recv_ts = 0
self.async_barrier = None
self.monitor = AsyncUtil()
self.connected = False
self.zipped = ZippedMsg()
self.t_state = self.THREAD_STATE_DEAD
# connects the async channel
def connect (self):
if self.connected:
self.disconnect()
self.tr = "tcp://{0}:{1}".format(self.server, self.port)
# Socket to talk to server
self.context = zmq.Context()
self.socket = self.context.socket(zmq.SUB)
# before running the thread - mark as active
self.t_state = self.THREAD_STATE_ACTIVE
self.t = threading.Thread(target = self._run_safe)
# kill this thread on exit and don't add it to the join list
self.t.setDaemon(True)
self.t.start()
self.connected = True
# first barrier - make sure async thread is up
rc = self.barrier()
if not rc:
self.disconnect()
return rc
return RC_OK()
# disconnect
def disconnect (self):
if not self.connected:
return
# mark for join
self.t_state = self.THREAD_STATE_DEAD
self.context.term()
self.t.join()
# done
self.connected = False
# set the thread as a zombie (in case of server death)
def set_as_zombie (self):
self.last_data_recv_ts = None
self.t_state = self.THREAD_STATE_ZOMBIE
# return the timeout in seconds for the ZMQ subscriber thread
def get_timeout_sec (self):
return 3
def _run_safe (self):
# socket must be created on the same thread
self.socket.setsockopt(zmq.SUBSCRIBE, b'')
self.socket.setsockopt(zmq.RCVTIMEO, self.get_timeout_sec() * 1000)
self.socket.connect(self.tr)
try:
self._run()
except Exception as e:
self.ctx.event_handler.on_event("subscriber crashed", e)
finally:
# closing of socket must be from the same thread
self.socket.close(linger = 0)
# thread function
def _run (self):
got_data = False
self.monitor.reset()
while self.t_state != self.THREAD_STATE_DEAD:
try:
with self.monitor:
line = self.socket.recv()
# last data recv.
self.last_data_recv_ts = time.time()
# if thread was marked as zombie - it does nothing besides fetching messages
if self.t_state == self.THREAD_STATE_ZOMBIE:
continue
self.monitor.on_recv_msg(line)
# try to decomrpess
unzipped = self.zipped.decompress(line)
if unzipped:
line = unzipped
line = line.decode()
# signal once
if not got_data:
self.ctx.event_handler.on_event("subscriber resumed")
got_data = True
# got a timeout - mark as not alive and retry
except zmq.Again:
# signal once
if got_data:
self.ctx.event_handler.on_event("subscriber timeout", self.get_timeout_sec())
got_data = False
continue
except zmq.ContextTerminated:
# outside thread signaled us to exit
assert(self.t_state != self.THREAD_STATE_ACTIVE)
break
msg = json.loads(line)
name = msg['name']
data = msg['data']
msg_type = msg['type']
baseline = msg.get('baseline', False)
self.raw_snapshot[name] = data
self.__dispatch(name, msg_type, data, baseline)
def get_stats (self):
return self.stats
def get_raw_snapshot (self):
return self.raw_snapshot
# dispatch the message to the right place
def __dispatch (self, name, type, data, baseline):
# stats
if name == "trex-global":
self.handle_global_stats_update(data, baseline)
elif name == "flow_stats":
self.handle_flow_stats_update(data, baseline)
elif name == "latency_stats":
self.handle_latency_stats_update(data, baseline)
# events
elif name == "trex-event":
self.handle_event(type, data)
# barriers
elif name == "trex-barrier":
self.handle_async_barrier(type, data)
else:
pass
def handle_global_stats_update (self, data, baseline):
self.ctx.event_handler.on_event("global stats update", data, baseline)
def handle_flow_stats_update (self, data, baseline):
self.ctx.event_handler.on_event("flow stats update", data, baseline)
def handle_flow_stats_update (self, data, baseline):
self.ctx.event_handler.on_event("latency stats update", data, baseline)
def handle_event (self, event_id, data):
if event_id == ServerEventsIDs.EVENT_PORT_STARTED:
port_id = int(data['port_id'])
self.ctx.event_handler.on_event("port started", port_id)
# port stopped
elif event_id == ServerEventsIDs.EVENT_PORT_STOPPED:
port_id = int(data['port_id'])
self.ctx.event_handler.on_event("port stopped", port_id)
# port paused
elif event_id == ServerEventsIDs.EVENT_PORT_PAUSED:
port_id = int(data['port_id'])
self.ctx.event_handler.on_event("port paused", port_id)
# port resumed
elif event_id == ServerEventsIDs.EVENT_PORT_RESUMED:
port_id = int(data['port_id'])
self.ctx.event_handler.on_event("port resumed", port_id)
# port finished traffic
elif event_id == ServerEventsIDs.EVENT_PORT_JOB_DONE:
port_id = int(data['port_id'])
self.ctx.event_handler.on_event("port job done", port_id)
# port was acquired - maybe stolen...
elif event_id == ServerEventsIDs.EVENT_PORT_ACQUIRED:
session_id = data['session_id']
port_id = int(data['port_id'])
who = data['who']
force = data['force']
self.ctx.event_handler.on_event("port acquired", port_id, who, session_id, force)
# port was released
elif event_id == ServerEventsIDs.EVENT_PORT_RELEASED:
port_id = int(data['port_id'])
who = data['who']
session_id = data['session_id']
self.ctx.event_handler.on_event("port released", port_id, who, session_id)
# port error
elif event_id == ServerEventsIDs.EVENT_PORT_ERROR:
port_id = int(data['port_id'])
self.ctx.event_handler.on_event("port error", port_id)
# port attr changed
elif event_id == ServerEventsIDs.EVENT_PORT_ATTR_CHG:
port_id = int(data['port_id'])
attr = data['attr']
self.ctx.event_handler.on_event("port attr chg", port_id, attr)
# ASTF state changed
elif event_id == ServerEventsIDs.EVENT_ASTF_STATE_CHG:
state = data['state']
error = data.get('error', '')
self.ctx.event_handler.on_event('astf state changed', state, error)
# server stopped
elif event_id == ServerEventsIDs.EVENT_SERVER_STOPPED:
cause = data['cause']
self.ctx.event_handler.on_event("server stopped", cause)
# unhandled
else:
print('Unhandled event %d' % event_id)
# async barrier handling routine
def handle_async_barrier (self, type, data):
if self.async_barrier['key'] == type:
self.async_barrier['ack'] = True
# block on barrier for async channel
def barrier(self, timeout = 5, baseline = False):
# set a random key
key = random.getrandbits(32)
self.async_barrier = {'key': key, 'ack': False}
# expr time
expr = time.time() + timeout
while not self.async_barrier['ack']:
# inject
rc = self.rpc.transmit("publish_now", params = {'key' : key, 'baseline': baseline})
if not rc:
return rc
# fast loop
for i in range(0, 100):
if self.async_barrier['ack']:
break
time.sleep(0.001)
if time.time() > expr:
return RC_ERR("*** [subscriber] - timeout - no data flow from server at : " + self.tr)
return RC_OK()
# a class to measure util. of async subscriber thread
class AsyncUtil(object):
STATE_SLEEP = 1
STATE_AWAKE = 2
def __init__ (self):
self.reset()
def reset (self):
self.state = self.STATE_AWAKE
self.clock = time.time()
# reset the current interval
self.interval = {'ts': time.time(), 'total_sleep': 0, 'total_bits': 0}
# global counters
self.cpu_util = 0
self.bps = 0
def on_recv_msg (self, message):
self.interval['total_bits'] += len(message) * 8.0
self._tick()
def __enter__ (self):
assert(self.state == self.STATE_AWAKE)
self.state = self.STATE_SLEEP
self.sleep_start_ts = time.time()
def __exit__(self, exc_type, exc_val, exc_tb):
assert(self.state == self.STATE_SLEEP)
self.state = self.STATE_AWAKE
# measure total sleep time for interval
self.interval['total_sleep'] += time.time() - self.sleep_start_ts
self._tick()
def _tick (self):
# how much time did the current interval lasted
ts = time.time() - self.interval['ts']
if ts < 1:
return
# if tick is in the middle of sleep - add the interval and reset
if self.state == self.STATE_SLEEP:
self.interval['total_sleep'] += time.time() - self.sleep_start_ts
self.sleep_start_ts = time.time()
# add the interval
if self.interval['total_sleep'] > 0:
# calculate
self.cpu_util = self.cpu_util * 0.75 + (float(ts - self.interval['total_sleep']) / ts) * 0.25
self.interval['total_sleep'] = 0
if self.interval['total_bits'] > 0:
# calculate
self.bps = self.bps * 0.75 + ( self.interval['total_bits'] / ts ) * 0.25
self.interval['total_bits'] = 0
# reset the interval's clock
self.interval['ts'] = time.time()
def get_cpu_util (self):
self._tick()
return (self.cpu_util * 100)
def get_bps (self):
self._tick()
return (self.bps)
|
vacuum.py | #!/usr/bin/python3
#Controlling Ecovacs Deebot vaccum with sucks
from sucks import *
import paho.mqtt.client as paho
import json
import random
import string
import sys
import logging
class DeebotMQTTClient:
def __init__(self, mqtt_config, ecovacs_config):
self._connected = False
self._command_topic = mqtt_config["command_topic"]
self._send_command_topic = mqtt_config["send_command_topic"]
self._state_topic = mqtt_config["state_topic"]
self._set_fan_speed_topic = mqtt_config["set_fan_speed_topic"]
self._attribute_topic = mqtt_config["json_attributes_topic"]
self._error_topic = mqtt_config["error_topic"]
self._availability_topic = mqtt_config["availability_topic"]
random_id = ''.join(random.choice(string.ascii_lowercase) for x in range(6))
self.mqtt_client = paho.Client(client_id="ecovacs-vacuum-mqtt-" + random_id)
self.mqtt_client.on_connect = self._on_connect
self.mqtt_client.on_disconnect = self._on_disconnect
self.mqtt_client.on_message = self._on_message
self._broker_host = mqtt_config["broker_host"]
self._broker_port = int(mqtt_config["broker_port"])
if mqtt_config["username"] != "" and mqtt_config["password"] != "":
self.mqtt_client.username_pw_set(mqtt_config["username"], mqtt_config["password"])
logging.info("Connecting to broker: " + self._broker_host + ":" + str(self._broker_port))
self.mqtt_client.connect(self._broker_host, port=self._broker_port, keepalive=60)
# logging.info("Starting the loop... ")
self.mqtt_client.loop_start()
while self._connected != True:
logging.info("waiting to be connected to mqtt broker")
time.sleep(0.1)
self._connect_to_deebot(ecovacs_config)
index = 0
while True:
if index % 120 == 0:
self._publish_availability()
index = index + 1
if index >= 1200:
index = 0
time.sleep(1)
def get_command_topic(self):
return self._command_topic
def get_send_command_topic(self):
return self._send_command_topic
def get_state_topic(self):
return self._state_topic
def get_attribute_topic(self):
return self._attribute_topic
def get_error_topic(self):
return self._error_topic
def get_fan_speed_topic(self):
return self._set_fan_speed_topic
def get_availability_topic(self):
return self._availability_topic
def publish(self, topic, message):
# retain=True, so if HA restarts, it can read the last vacuum status
logging.info("Publish message to topic " + topic + ": " + json.dumps(message))
self.mqtt_client.publish(topic, json.dumps(message), qos=2, retain=True)
def _connect_to_deebot(self, config):
api = EcoVacsAPI(config['device_id'], config['email'], config['password_hash'], config['country'], config['continent'])
my_vac = api.devices()[0]
self.vacbot = VacBot(api.uid, api.REALM, api.resource, api.user_access_token, my_vac, config['continent'], monitor=True, verify_ssl=config['verify_ssl'])
self._subscribe_events()
self.vacbot.connect_and_wait_until_ready()
def _subscribe_events(self):
# Subscribe to the all event emitters
self.vacbot.batteryEvents.subscribe(self._battery_report)
self.vacbot.statusEvents.subscribe(self._status_report)
self.vacbot.lifespanEvents.subscribe(self._lifespan_report)
self.vacbot.errorEvents.subscribe(self._error_report)
# Callback function for battery events
def _battery_report(self, level):
state_report = {
"battery_level": int(float(level) * 100),
"state": self.vacbot.vacuum_status,
"fan_speed": self.vacbot.fan_speed,
}
self._publish_ha_state_report(state_report)
# Callback function for battery events
def _status_report(self, status):
battery_level = "0"
if self.vacbot.battery_status != None:
battery_level = str(float(self.vacbot.battery_status) * 100)
state_report = {
"battery_level": int(float(battery_level)),
"state": str(status),
"fan_speed": self.vacbot.fan_speed,
}
self._publish_ha_state_report(state_report)
self._publish_availability()
def _publish_ha_state_report(self, state_report):
# State has to be one of vacuum states supported by Home Assistant:
ha_vacuum_supported_statuses = [
"cleaning", "docked", "paused", "idle", "returning", "error"
]
state = state_report['state']
if state not in ha_vacuum_supported_statuses:
if state == "charging":
state_report['state'] = "docked"
elif state == "auto" or state == "spot_area":
state_report['state'] = "cleaning"
elif state == "stop":
state_report['state'] = "idle"
elif state == "pause":
state_report['state'] = "paused"
else:
logging.warning("Unknow HA status: " + state)
self.publish(self.get_state_topic(), state_report)
attributes_status = {
"original_status": 'charging' if self.vacbot.charge_status == 'charging' else state,
"clean_status": self.vacbot.clean_status,
"charge_status": self.vacbot.charge_status
}
for component_type in self.vacbot.components.keys():
attributes_status[component_type] = str(int(self.vacbot.components[component_type] * 100))
self.publish(self.get_attribute_topic(), attributes_status)
def _publish_availability(self):
if self.vacbot.vacuum_status != "offline":
self.publish(self.get_availability_topic(), "online")
else:
self.publish(self.get_availability_topic(), "offline")
# Callback function for lifespan (components) events
def _lifespan_report(self, lifespan):
lifespan_type = lifespan['type']
changed_value = str(int(100 * lifespan['lifespan']))
attributes_status = {
"original_status": 'charging' if self.vacbot.charge_status == 'charging' else self.vacbot.vacuum_status,
"clean_status": self.vacbot.clean_status,
"charge_status": self.vacbot.charge_status
}
for component_type in self.vacbot.components.keys():
if component_type == lifespan_type:
attributes_status[component_type] = changed_value
else:
attributes_status[component_type] = str(int(self.vacbot.components[component_type] * 100))
self.publish(self.get_attribute_topic(), attributes_status)
# Callback function for error events
# THIS NEEDS A LOT OF WORK
def _error_report(self, error):
error_str = str(error)
logging.info("Error: " + error_str)
self.publish(self.get_error_topic(), error_str)
def _on_message(self, client, userdata, message):
payload = message.payload.decode("utf-8").strip()
logging.info("Message received: " + payload)
if message.topic == self.get_command_topic():
if (payload == "turn_on" or payload == "start"):
logging.info("Clean started...")
self._threaded_vacbot_run(Clean())
elif(payload == "pause"):
logging.info("Pause robot")
self._threaded_vacbot_run(Stop())
elif(payload == "stop"):
logging.info("Stop robot")
self._threaded_vacbot_run(Stop())
elif(payload == "return_to_base" or payload == "return_home"):
logging.info("Return to base")
self._threaded_vacbot_run(Charge())
elif(payload == "locate"):
logging.info("Locate robot")
self._threaded_vacbot_run(PlaySound())
elif(payload == "clean_spot"):
logging.info("Clean spot")
self._threaded_vacbot_run(Spot())
elif(payload == "edge"):
logging.info("Clean edge")
self._threaded_vacbot_run(Edge())
elif message.topic == self.get_fan_speed_topic():
self._threaded_vacbot_run(Clean(speed=payload))
elif message.topic == self.get_send_command_topic():
if payload == "":
logging.info("Clean started for all home")
self._threaded_vacbot_run(Clean())
else:
logging.info("Clean started for area: " + payload)
self._threaded_vacbot_run(SpotArea(area=payload))
logging.info("Get clean and charge states")
self._threaded_vacbot_run(GetCleanState())
self._threaded_vacbot_run(GetChargeState())
def _threaded_vacbot_run(self, command):
x = threading.Thread(target=self.vacbot.run, args=(command,))
x.start()
def _on_connect(self, client, obj, flags, rc):
if rc == 0:
logging.info("Connected to broker")
self._connected = True
logging.info("OnConnect: subscribing to " + self.get_command_topic())
self.mqtt_client.subscribe(self.get_command_topic())
logging.info("OnConnect: subscribing to " + self.get_fan_speed_topic())
self.mqtt_client.subscribe(self.get_fan_speed_topic())
logging.info("OnConnect: subscribing to " + self.get_send_command_topic())
self.mqtt_client.subscribe(self.get_send_command_topic())
else:
logging.info("Connection failed")
def _on_disconnect(self, client, userdata, rc):
logging.warning("Disconnected from MQTT broker. Trying to reconnect")
self.mqtt_client.reconnect()
def __del__(self):
logging.info('Destructor called! Unsubscribing from MQTT topic.')
self.mqtt_client.disconnect()
self.mqtt_client.loop_stop()
if __name__ == "__main__":
options_path = "/data/options.json"
with open(options_path, encoding='utf-8') as options_file:
config = json.load(options_file)
logging_level = logging.INFO
if 'log_level' in config:
levelnum = logging.getLevelName(config['log_level'].upper())
try:
logging_level = int(levelnum)
except ValueError:
logging_level = logging.INFO
logging.basicConfig(format='%(asctime)s %(message)s', datefmt='%d/%m/%Y %H:%M:%S', stream=sys.stdout, level=logging_level)
DeebotMQTTClient(config['mqtt'], config['ecovacs'])
|
multidownload_xkcd.py | #! /usr/bin/env python3
# multidownload_xkcd.py - downloads XKCD comics using multiple threads
import os
import re
import threading
from os.path import basename, join
import bs4
import requests
ANSI_RED = '\033[31;m'
ANSI_RESET = '\033[0m'
def download_pages(start_num, end_num, root):
"""
Downloads all webcomic images from start_num to end_num and saves
results to root
:param int start_num: webcomic number to start downloading from
:param int end_num: webcomic number to stop downloading at
:param str root: output path
"""
if start_num < 1:
start_num = 1
for comic_num in range(start_num, end_num - 1, -1):
print(f"{'Downloading page:':<19} https://xkcd.com/{comic_num}...")
try:
res = requests.get(f'https://xkcd.com/{comic_num}')
res.raise_for_status()
except requests.exceptions.HTTPError:
continue
soup = bs4.BeautifulSoup(res.content, features='lxml')
comic_element = soup.select('#comic img')
# find the URL of the comic image
if not comic_element:
print(f'\n{ANSI_RED}Could not find comic image.{ANSI_RESET}\n')
else:
download_image(comic_element=comic_element, root=root)
def download_image(comic_element, root):
"""
Download image from individual comic entry
:param list comic_element: list of matches to CSS selector
:param str root: output path
"""
# download the image
comic_url = f'https:{comic_element[0].get("src")}'
print(f"{'Downloading image:':<19} {comic_url}...")
try:
res = requests.get(comic_url)
res.raise_for_status()
# save the image locally
with open(join(root, basename(comic_url)), 'wb') as image:
for chunk in res.iter_content(100_000):
image.write(chunk)
except requests.exceptions.MissingSchema:
# skip this comic
print(f'\n{ANSI_RED}An error occurred while downloading the image at {comic_url}'
f'\n\tThis comic is being skipping and will not be downloaded{ANSI_RESET}\n')
def get_latest_comic_number(comic_num=2101):
"""
Finds number of latest comic from the homepage
:param int comic_num: default value returned if match isn't found
"""
try:
# download homepage
homepage = requests.get('https://xkcd.com/')
homepage.raise_for_status()
except requests.exceptions.HTTPError:
return comic_num
regex = re.compile(r'\nPermanent link to this comic: https://xkcd.com/(\d+)/')
match = regex.search(homepage.text)
if match:
comic_num = int(match.group(1))
return comic_num
def download_all_comics(output_path='xkcd_comics'):
"""
Downloads all XKCD webcomics
:param str output_path: path to desired output directory
"""
# create directory for output
os.makedirs(output_path, exist_ok=True)
threads = []
# create threads for downloading
for x in range(get_latest_comic_number(), 0, -100):
download_thread = threading.Thread(target=download_pages, args=(x, x - 99, output_path))
threads.append(download_thread)
download_thread.start()
# wait for all threads to end
for thread in threads:
thread.join()
print(f'\nAll successfully downloaded images have been saved to '
f'{join(output_path, "[filename]")}'
f'\nDone.')
if __name__ == '__main__':
download_all_comics()
|
test_gc.py | import unittest
import unittest.mock
from test.support import (verbose, refcount_test,
cpython_only, start_threads,
temp_dir, TESTFN, unlink,
import_module, collect_in_thread)
from test.support.script_helper import assert_python_ok, make_script
import gc
import sys
import sysconfig
import textwrap
import threading
import time
import weakref
try:
from _testcapi import with_tp_del
except ImportError:
def with_tp_del(cls):
class C(object):
def __new__(cls, *args, **kwargs):
raise TypeError('requires _testcapi.with_tp_del')
return C
try:
from _testcapi import ContainerNoGC
except ImportError:
ContainerNoGC = None
### Support code
###############################################################################
# Bug 1055820 has several tests of longstanding bugs involving weakrefs and
# cyclic gc.
# An instance of C1055820 has a self-loop, so becomes cyclic trash when
# unreachable.
class C1055820(object):
def __init__(self, i):
self.i = i
self.loop = self
class GC_Detector(object):
# Create an instance I. Then gc hasn't happened again so long as
# I.gc_happened is false.
def __init__(self):
self.gc_happened = False
def it_happened(ignored):
self.gc_happened = True
# Create a piece of cyclic trash that triggers it_happened when
# gc collects it.
self.wr = weakref.ref(C1055820(666), it_happened)
@with_tp_del
class Uncollectable(object):
"""Create a reference cycle with multiple __del__ methods.
An object in a reference cycle will never have zero references,
and so must be garbage collected. If one or more objects in the
cycle have __del__ methods, the gc refuses to guess an order,
and leaves the cycle uncollected."""
def __init__(self, partner=None):
if partner is None:
self.partner = Uncollectable(partner=self)
else:
self.partner = partner
def __tp_del__(self):
pass
if sysconfig.get_config_vars().get('PY_CFLAGS', ''):
BUILD_WITH_NDEBUG = ('-DNDEBUG' in sysconfig.get_config_vars()['PY_CFLAGS'])
else:
# Usually, sys.gettotalrefcount() is only present if Python has been
# compiled in debug mode. If it's missing, expect that Python has
# been released in release mode: with NDEBUG defined.
BUILD_WITH_NDEBUG = (not hasattr(sys, 'gettotalrefcount'))
### Tests
###############################################################################
class GCTests(unittest.TestCase):
def test_list(self):
l = []
l.append(l)
gc.collect()
del l
self.assertEqual(gc.collect(), 1)
def test_dict(self):
d = {}
d[1] = d
gc.collect()
del d
self.assertEqual(gc.collect(), 1)
def test_tuple(self):
# since tuples are immutable we close the loop with a list
l = []
t = (l,)
l.append(t)
gc.collect()
del t
del l
self.assertEqual(gc.collect(), 2)
def test_class(self):
class A:
pass
A.a = A
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_newstyleclass(self):
class A(object):
pass
gc.collect()
del A
self.assertNotEqual(gc.collect(), 0)
def test_instance(self):
class A:
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
def test_newinstance(self):
class A(object):
pass
a = A()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
class B(list):
pass
class C(B, A):
pass
a = C()
a.a = a
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
del B, C
self.assertNotEqual(gc.collect(), 0)
A.a = A()
del A
self.assertNotEqual(gc.collect(), 0)
self.assertEqual(gc.collect(), 0)
def test_method(self):
# Tricky: self.__init__ is a bound method, it references the instance.
class A:
def __init__(self):
self.init = self.__init__
a = A()
gc.collect()
del a
self.assertNotEqual(gc.collect(), 0)
@cpython_only
def test_legacy_finalizer(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A:
def __tp_del__(self): pass
class B:
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
@cpython_only
def test_legacy_finalizer_newclass(self):
# A() is uncollectable if it is part of a cycle, make sure it shows up
# in gc.garbage.
@with_tp_del
class A(object):
def __tp_del__(self): pass
class B(object):
pass
a = A()
a.a = a
id_a = id(a)
b = B()
b.b = b
gc.collect()
del a
del b
self.assertNotEqual(gc.collect(), 0)
for obj in gc.garbage:
if id(obj) == id_a:
del obj.a
break
else:
self.fail("didn't find obj in garbage (finalizer)")
gc.garbage.remove(obj)
def test_function(self):
# Tricky: f -> d -> f, code should call d.clear() after the exec to
# break the cycle.
d = {}
exec("def f(): pass\n", d)
gc.collect()
del d
self.assertEqual(gc.collect(), 3)
@refcount_test
def test_frame(self):
def f():
frame = sys._getframe()
locals = frame.f_locals
gc.collect()
f()
self.assertEqual(gc.collect(), 2)
def test_saveall(self):
# Verify that cyclic garbage like lists show up in gc.garbage if the
# SAVEALL option is enabled.
# First make sure we don't save away other stuff that just happens to
# be waiting for collection.
gc.collect()
# if this fails, someone else created immortal trash
self.assertEqual(gc.garbage, [])
L = []
L.append(L)
id_L = id(L)
debug = gc.get_debug()
gc.set_debug(debug | gc.DEBUG_SAVEALL)
del L
gc.collect()
gc.set_debug(debug)
self.assertEqual(len(gc.garbage), 1)
obj = gc.garbage.pop()
self.assertEqual(id(obj), id_L)
def test_del(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A:
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
def test_del_newclass(self):
# __del__ methods can trigger collection, make this to happen
thresholds = gc.get_threshold()
gc.enable()
gc.set_threshold(1)
class A(object):
def __del__(self):
dir(self)
a = A()
del a
gc.disable()
gc.set_threshold(*thresholds)
# The following test is implementation-dependent because it
# counts the number of allocations.
@refcount_test
def test_get_count(self):
gc.collect()
a, b, c = gc.get_count()
# Note: get_count() isn't precise. To enable faster allocations,
# it is only updated when mimalloc "pages" become full or are no
# longer full.
tmp = []
for _ in range(2048):
tmp.append([''])
d, e, f = gc.get_count()
self.assertEqual((b, c), (0, 0))
self.assertEqual((e, f), (0, 0))
# Between the two calls to get_count(), enough objects were
# created to increase the count.
self.assertGreater(d, a)
def test_trashcan(self):
class Ouch:
n = 0
def __del__(self):
Ouch.n = Ouch.n + 1
if Ouch.n % 17 == 0:
gc.collect()
# "trashcan" is a hack to prevent stack overflow when deallocating
# very deeply nested tuples etc. It works in part by abusing the
# type pointer and refcount fields, and that can yield horrible
# problems when gc tries to traverse the structures.
# If this test fails (as it does in 2.0, 2.1 and 2.2), it will
# most likely die via segfault.
# Note: In 2.3 the possibility for compiling without cyclic gc was
# removed, and that in turn allows the trashcan mechanism to work
# via much simpler means (e.g., it never abuses the type pointer or
# refcount fields anymore). Since it's much less likely to cause a
# problem now, the various constants in this expensive (we force a lot
# of full collections) test are cut back from the 2.2 version.
gc.enable()
N = 150
for count in range(2):
t = []
for i in range(N):
t = [t, Ouch()]
u = []
for i in range(N):
u = [u, Ouch()]
v = {}
for i in range(N):
v = {1: v, 2: Ouch()}
gc.disable()
def test_trashcan_threads(self):
# Issue #13992: trashcan mechanism should be thread-safe
NESTING = 60
N_THREADS = 2
def sleeper_gen():
"""A generator that releases the GIL when closed or dealloc'ed."""
try:
yield
finally:
time.sleep(0.000001)
class C(list):
# Appending to a list is atomic, which avoids the use of a lock.
inits = []
dels = []
def __init__(self, alist):
self[:] = alist
C.inits.append(None)
def __del__(self):
# This __del__ is called by subtype_dealloc().
C.dels.append(None)
# `g` will release the GIL when garbage-collected. This
# helps assert subtype_dealloc's behaviour when threads
# switch in the middle of it.
g = sleeper_gen()
next(g)
# Now that __del__ is finished, subtype_dealloc will proceed
# to call list_dealloc, which also uses the trashcan mechanism.
def make_nested():
"""Create a sufficiently nested container object so that the
trashcan mechanism is invoked when deallocating it."""
x = C([])
for i in range(NESTING):
x = [C([x])]
del x
def run_thread():
"""Exercise make_nested() in a loop."""
while not exit:
make_nested()
old_switchinterval = sys.getswitchinterval()
sys.setswitchinterval(1e-5)
try:
exit = []
threads = []
for i in range(N_THREADS):
t = threading.Thread(target=run_thread)
threads.append(t)
with start_threads(threads, lambda: exit.append(1)):
time.sleep(1.0)
finally:
sys.setswitchinterval(old_switchinterval)
gc.collect()
self.assertEqual(len(C.inits), len(C.dels))
def test_gc_critical_lock(self):
from dataclasses import dataclass
@dataclass(frozen=True)
class Key:
value: int
d = {}
for i in range(1000):
d[Key(i)] = i
with collect_in_thread():
for _ in range(10):
for i in range(1000):
d[Key(i)] = i * 10
def test_boom(self):
class Boom:
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom()
b = Boom()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# a<->b are in a trash cycle now. Collection will invoke
# Boom.__getattr__ (to see whether a and b have __del__ methods), and
# __getattr__ deletes the internal "attr" attributes as a side effect.
# That causes the trash cycle to get reclaimed via refcounts falling to
# 0, thus mutating the trash graph as a side effect of merely asking
# whether __del__ exists. This used to (before 2.3b1) crash Python.
# Now __getattr__ isn't called.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2(self):
class Boom2:
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2()
b = Boom2()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
# Much like test_boom(), except that __getattr__ doesn't break the
# cycle until the second time gc checks for __del__. As of 2.3b1,
# there isn't a second time, so this simply cleans up the trash cycle.
# We expect a, b, a.__dict__ and b.__dict__ (4 objects) to get
# reclaimed this way.
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom_new(self):
# boom__new and boom2_new are exactly like boom and boom2, except use
# new-style classes.
class Boom_New(object):
def __getattr__(self, someattribute):
del self.attr
raise AttributeError
a = Boom_New()
b = Boom_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_boom2_new(self):
class Boom2_New(object):
def __init__(self):
self.x = 0
def __getattr__(self, someattribute):
self.x += 1
if self.x > 1:
del self.attr
raise AttributeError
a = Boom2_New()
b = Boom2_New()
a.attr = b
b.attr = a
gc.collect()
garbagelen = len(gc.garbage)
del a, b
self.assertEqual(gc.collect(), 4)
self.assertEqual(len(gc.garbage), garbagelen)
def test_get_referents(self):
alist = [1, 3, 5]
got = gc.get_referents(alist)
got.sort()
self.assertEqual(got, alist)
atuple = tuple(alist)
got = gc.get_referents(atuple)
got.sort()
self.assertEqual(got, alist)
adict = {1: 3, 5: 7}
expected = [1, 3, 5, 7]
got = gc.get_referents(adict)
got.sort()
self.assertEqual(got, expected)
got = gc.get_referents([1, 2], {3: 4}, (0, 0, 0))
got.sort()
self.assertEqual(got, [0, 0] + list(range(5)))
self.assertEqual(gc.get_referents(1, 'a', 4j), [])
def test_is_tracked(self):
# Atomic built-in types are not tracked, user-defined objects and
# mutable containers are.
# NOTE: types with special optimizations (e.g. tuple) have tests
# in their own test files instead.
self.assertFalse(gc.is_tracked(None))
self.assertFalse(gc.is_tracked(1))
self.assertFalse(gc.is_tracked(1.0))
self.assertFalse(gc.is_tracked(1.0 + 5.0j))
self.assertFalse(gc.is_tracked(True))
self.assertFalse(gc.is_tracked(False))
self.assertFalse(gc.is_tracked(b"a"))
self.assertFalse(gc.is_tracked("a"))
self.assertFalse(gc.is_tracked(bytearray(b"a")))
self.assertFalse(gc.is_tracked(type))
self.assertFalse(gc.is_tracked(int))
self.assertFalse(gc.is_tracked(object))
self.assertFalse(gc.is_tracked(object()))
class UserClass:
pass
class UserInt(int):
pass
# Base class is object; no extra fields.
class UserClassSlots:
__slots__ = ()
# Base class is fixed size larger than object; no extra fields.
class UserFloatSlots(float):
__slots__ = ()
# Base class is variable size; no extra fields.
class UserIntSlots(int):
__slots__ = ()
self.assertTrue(gc.is_tracked(gc))
self.assertTrue(gc.is_tracked(UserClass))
self.assertTrue(gc.is_tracked(UserClass()))
self.assertTrue(gc.is_tracked(UserInt()))
self.assertTrue(gc.is_tracked([]))
self.assertTrue(gc.is_tracked(set()))
self.assertTrue(gc.is_tracked(UserClassSlots()))
self.assertTrue(gc.is_tracked(UserFloatSlots()))
self.assertTrue(gc.is_tracked(UserIntSlots()))
def test_is_finalized(self):
# Objects not tracked by the always gc return false
self.assertFalse(gc.is_finalized(3))
storage = []
class Lazarus:
def __del__(self):
storage.append(self)
lazarus = Lazarus()
self.assertFalse(gc.is_finalized(lazarus))
del lazarus
gc.collect()
lazarus = storage.pop()
self.assertTrue(gc.is_finalized(lazarus))
def test_bug1055820b(self):
# Corresponds to temp2b.py in the bug report.
ouch = []
def callback(ignored):
ouch[:] = [wr() for wr in WRs]
Cs = [C1055820(i) for i in range(2)]
WRs = [weakref.ref(c, callback) for c in Cs]
c = None
gc.collect()
self.assertEqual(len(ouch), 0)
# Make the two instances trash, and collect again. The bug was that
# the callback materialized a strong reference to an instance, but gc
# cleared the instance's dict anyway.
Cs = None
gc.collect()
self.assertEqual(len(ouch), 2) # else the callbacks didn't run
for x in ouch:
# If the callback resurrected one of these guys, the instance
# would be damaged, with an empty __dict__.
self.assertEqual(x, None)
def test_bug21435(self):
# This is a poor test - its only virtue is that it happened to
# segfault on Tim's Windows box before the patch for 21435 was
# applied. That's a nasty bug relying on specific pieces of cyclic
# trash appearing in exactly the right order in finalize_garbage()'s
# input list.
# But there's no reliable way to force that order from Python code,
# so over time chances are good this test won't really be testing much
# of anything anymore. Still, if it blows up, there's _some_
# problem ;-)
gc.collect()
class A:
pass
class B:
def __init__(self, x):
self.x = x
def __del__(self):
self.attr = None
def do_work():
a = A()
b = B(A())
a.attr = b
b.attr = a
do_work()
gc.collect() # this blows up (bad C pointer) when it fails
@cpython_only
def test_garbage_at_shutdown(self):
import subprocess
code = """if 1:
import gc
import _testcapi
@_testcapi.with_tp_del
class X:
def __init__(self, name):
self.name = name
def __repr__(self):
return "<X %%r>" %% self.name
def __tp_del__(self):
pass
x = X('first')
x.x = x
x.y = X('second')
del x
gc.set_debug(%s)
"""
def run_command(code):
p = subprocess.Popen([sys.executable, "-Wd", "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
self.assertEqual(p.returncode, 0)
self.assertEqual(stdout, b"")
return stderr
stderr = run_command(code % "0")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown; use", stderr)
self.assertNotIn(b"<X 'first'>", stderr)
# With DEBUG_UNCOLLECTABLE, the garbage list gets printed
stderr = run_command(code % "gc.DEBUG_UNCOLLECTABLE")
self.assertIn(b"ResourceWarning: gc: 2 uncollectable objects at "
b"shutdown", stderr)
self.assertTrue(
(b"[<X 'first'>, <X 'second'>]" in stderr) or
(b"[<X 'second'>, <X 'first'>]" in stderr), stderr)
# With DEBUG_SAVEALL, no additional message should get printed
# (because gc.garbage also contains normally reclaimable cyclic
# references, and its elements get printed at runtime anyway).
stderr = run_command(code % "gc.DEBUG_SAVEALL")
self.assertNotIn(b"uncollectable objects at shutdown", stderr)
def test_gc_main_module_at_shutdown(self):
# Create a reference cycle through the __main__ module and check
# it gets collected at interpreter shutdown.
code = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_gc_ordinary_module_at_shutdown(self):
# Same as above, but with a non-__main__ module.
with temp_dir() as script_dir:
module = """if 1:
class C:
def __del__(self):
print('__del__ called')
l = [C()]
l.append(l)
"""
code = """if 1:
import sys
sys.path.insert(0, %r)
import gctest
""" % (script_dir,)
make_script(script_dir, 'gctest', module)
rc, out, err = assert_python_ok('-c', code)
self.assertEqual(out.strip(), b'__del__ called')
def test_global_del_SystemExit(self):
code = """if 1:
class ClassWithDel:
def __del__(self):
print('__del__ called')
a = ClassWithDel()
a.link = a
raise SystemExit(0)"""
self.addCleanup(unlink, TESTFN)
with open(TESTFN, 'w') as script:
script.write(code)
rc, out, err = assert_python_ok(TESTFN)
self.assertEqual(out.strip(), b'__del__ called')
def test_get_stats(self):
stats = gc.get_stats()
self.assertEqual(len(stats), 1)
for st in stats:
self.assertIsInstance(st, dict)
self.assertEqual(set(st),
{"collected", "collections", "uncollectable"})
self.assertGreaterEqual(st["collected"], 0)
self.assertGreaterEqual(st["collections"], 0)
self.assertGreaterEqual(st["uncollectable"], 0)
# Check that collection counts are incremented correctly
if gc.isenabled():
self.addCleanup(gc.enable)
gc.disable()
old = gc.get_stats()
gc.collect()
new = gc.get_stats()
self.assertEqual(new[0]["collections"], old[0]["collections"] + 1)
def test_freeze(self):
# freeze no longer does anything, so count is always zero :(
gc.freeze()
self.assertEqual(gc.get_freeze_count(), 0)
gc.unfreeze()
self.assertEqual(gc.get_freeze_count(), 0)
def test_get_objects(self):
gc.collect()
l = []
l.append(l)
self.assertTrue(
any(l is element for element in gc.get_objects())
)
gc.collect()
self.assertTrue(
any(l is element for element in gc.get_objects())
)
del l
gc.collect()
def test_get_objects_arguments(self):
self.assertGreater(len(gc.get_objects()), 0)
self.assertRaises(ValueError, gc.get_objects, 1000)
self.assertRaises(ValueError, gc.get_objects, -1000)
self.assertRaises(TypeError, gc.get_objects, "1")
self.assertRaises(TypeError, gc.get_objects, 1.234)
def test_resurrection_only_happens_once_per_object(self):
class A: # simple self-loop
def __init__(self):
self.me = self
class Lazarus(A):
resurrected = 0
resurrected_instances = []
def __del__(self):
Lazarus.resurrected += 1
Lazarus.resurrected_instances.append(self)
gc.collect()
gc.disable()
# We start with 0 resurrections
laz = Lazarus()
self.assertEqual(Lazarus.resurrected, 0)
# Deleting the instance and triggering a collection
# resurrects the object
del laz
gc.collect()
self.assertEqual(Lazarus.resurrected, 1)
self.assertEqual(len(Lazarus.resurrected_instances), 1)
# Clearing the references and forcing a collection
# should not resurrect the object again.
Lazarus.resurrected_instances.clear()
self.assertEqual(Lazarus.resurrected, 1)
gc.collect()
self.assertEqual(Lazarus.resurrected, 1)
gc.enable()
def test_resurrection_is_transitive(self):
class Cargo:
def __init__(self):
self.me = self
class Lazarus:
resurrected_instances = []
def __del__(self):
Lazarus.resurrected_instances.append(self)
gc.collect()
gc.disable()
laz = Lazarus()
cargo = Cargo()
cargo_id = id(cargo)
# Create a cycle between cargo and laz
laz.cargo = cargo
cargo.laz = laz
# Drop the references, force a collection and check that
# everything was resurrected.
del laz, cargo
gc.collect()
self.assertEqual(len(Lazarus.resurrected_instances), 1)
instance = Lazarus.resurrected_instances.pop()
self.assertTrue(hasattr(instance, "cargo"))
self.assertEqual(id(instance.cargo), cargo_id)
gc.collect()
gc.enable()
def test_resurrection_does_not_block_cleanup_of_other_objects(self):
# When a finalizer resurrects objects, stats were reporting them as
# having been collected. This affected both collect()'s return
# value and the dicts returned by get_stats().
N = 100
class A: # simple self-loop
def __init__(self):
self.me = self
class Z(A): # resurrecting __del__
def __del__(self):
zs.append(self)
zs = []
def getstats():
d = gc.get_stats()[-1]
return d['collected'], d['uncollectable']
gc.collect()
gc.disable()
# No problems if just collecting A() instances.
oldc, oldnc = getstats()
for i in range(N):
A()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N) # instance object & its dict
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
# But Z() is not actually collected.
oldc, oldnc = c, nc
Z()
# Nothing is collected - Z() is merely resurrected.
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 0)
self.assertEqual(c - oldc, 0)
self.assertEqual(nc - oldnc, 0)
# Z() should not prevent anything else from being collected.
oldc, oldnc = c, nc
for i in range(N):
A()
Z()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 2*N)
self.assertEqual(c - oldc, 2*N)
self.assertEqual(nc - oldnc, 0)
# The A() trash should have been reclaimed already but the
# 2 copies of Z are still in zs (and the associated dicts).
oldc, oldnc = c, nc
zs.clear()
t = gc.collect()
c, nc = getstats()
self.assertEqual(t, 4)
self.assertEqual(c - oldc, 4)
self.assertEqual(nc - oldnc, 0)
gc.enable()
@unittest.skipIf(ContainerNoGC is None,
'requires ContainerNoGC extension type')
def test_trash_weakref_clear(self):
# Test that trash weakrefs are properly cleared (bpo-38006).
#
# Structure we are creating:
#
# Z <- Y <- A--+--> WZ -> C
# ^ |
# +--+
# where:
# WZ is a weakref to Z with callback C
# Y doesn't implement tp_traverse
# A contains a reference to itself, Y and WZ
#
# A, Y, Z, WZ are all trash. The GC doesn't know that Z is trash
# because Y does not implement tp_traverse. To show the bug, WZ needs
# to live long enough so that Z is deallocated before it. Then, if
# gcmodule is buggy, when Z is being deallocated, C will run.
#
# To ensure WZ lives long enough, we put it in a second reference
# cycle. That trick only works due to the ordering of the GC prev/next
# linked lists. So, this test is a bit fragile.
#
# The bug reported in bpo-38006 is caused because the GC did not
# clear WZ before starting the process of calling tp_clear on the
# trash. Normally, handle_weakrefs() would find the weakref via Z and
# clear it. However, since the GC cannot find Z, WR is not cleared and
# it can execute during delete_garbage(). That can lead to disaster
# since the callback might tinker with objects that have already had
# tp_clear called on them (leaving them in possibly invalid states).
callback = unittest.mock.Mock()
class A:
__slots__ = ['a', 'y', 'wz']
class Z:
pass
# setup required object graph, as described above
a = A()
a.a = a
a.y = ContainerNoGC(Z())
a.wz = weakref.ref(a.y.value, callback)
# create second cycle to keep WZ alive longer
wr_cycle = [a.wz]
wr_cycle.append(wr_cycle)
# ensure trash unrelated to this test is gone
gc.collect()
gc.disable()
# release references and create trash
del a, wr_cycle
gc.collect()
# if called, it means there is a bug in the GC. The weakref should be
# cleared before Z dies.
callback.assert_not_called()
gc.enable()
class GCCallbackTests(unittest.TestCase):
def setUp(self):
# Save gc state and disable it.
self.enabled = gc.isenabled()
gc.disable()
self.debug = gc.get_debug()
gc.set_debug(0)
gc.callbacks.append(self.cb1)
gc.callbacks.append(self.cb2)
self.othergarbage = []
def tearDown(self):
# Restore gc state
del self.visit
gc.callbacks.remove(self.cb1)
gc.callbacks.remove(self.cb2)
gc.set_debug(self.debug)
if self.enabled:
gc.enable()
# destroy any uncollectables
gc.collect()
for obj in gc.garbage:
if isinstance(obj, Uncollectable):
obj.partner = None
del gc.garbage[:]
del self.othergarbage
gc.collect()
def preclean(self):
# Remove all fluff from the system. Invoke this function
# manually rather than through self.setUp() for maximum
# safety.
self.visit = []
gc.collect()
garbage, gc.garbage[:] = gc.garbage[:], []
self.othergarbage.append(garbage)
self.visit = []
def cb1(self, phase, info):
self.visit.append((1, phase, dict(info)))
def cb2(self, phase, info):
self.visit.append((2, phase, dict(info)))
if phase == "stop" and hasattr(self, "cleanup"):
# Clean Uncollectable from garbage
uc = [e for e in gc.garbage if isinstance(e, Uncollectable)]
gc.garbage[:] = [e for e in gc.garbage
if not isinstance(e, Uncollectable)]
for e in uc:
e.partner = None
def test_collect(self):
self.preclean()
gc.collect()
# Algorithmically verify the contents of self.visit
# because it is long and tortuous.
# Count the number of visits to each callback
n = [v[0] for v in self.visit]
n1 = [i for i in n if i == 1]
n2 = [i for i in n if i == 2]
self.assertEqual(n1, [1]*2)
self.assertEqual(n2, [2]*2)
# Count that we got the right number of start and stop callbacks.
n = [v[1] for v in self.visit]
n1 = [i for i in n if i == "start"]
n2 = [i for i in n if i == "stop"]
self.assertEqual(n1, ["start"]*2)
self.assertEqual(n2, ["stop"]*2)
# Check that we got the right info dict for all callbacks
for v in self.visit:
info = v[2]
self.assertTrue("generation" in info)
self.assertTrue("collected" in info)
self.assertTrue("uncollectable" in info)
def test_collect_generation(self):
self.preclean()
gc.collect(0)
for v in self.visit:
info = v[2]
self.assertEqual(info["generation"], 0)
@cpython_only
def test_collect_garbage(self):
self.preclean()
# Each of these cause four objects to be garbage: Two
# Uncollectables and their instance dicts.
Uncollectable()
Uncollectable()
C1055820(666)
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 2)
self.assertEqual(info["uncollectable"], 8)
# We should now have the Uncollectables in gc.garbage
self.assertEqual(len(gc.garbage), 4)
for e in gc.garbage:
self.assertIsInstance(e, Uncollectable)
# Now, let our callback handle the Uncollectable instances
self.cleanup=True
self.visit = []
gc.garbage[:] = []
gc.collect()
for v in self.visit:
if v[1] != "stop":
continue
info = v[2]
self.assertEqual(info["collected"], 0)
self.assertEqual(info["uncollectable"], 4)
# Uncollectables should be gone
self.assertEqual(len(gc.garbage), 0)
@unittest.skipIf(BUILD_WITH_NDEBUG,
'built with -NDEBUG')
def test_refcount_errors(self):
self.preclean()
# Verify the "handling" of objects with broken refcounts
# Skip the test if ctypes is not available
import_module("ctypes")
import subprocess
code = textwrap.dedent('''
from test.support import gc_collect, SuppressCrashReport
a = [1, 2, 3]
b = [a]
# Avoid coredump when Py_FatalError() calls abort()
SuppressCrashReport().__enter__()
# Simulate the refcount of "a" being too low (compared to the
# references held on it by live data), but keeping it above zero
# (to avoid deallocating it):
import ctypes
ctypes.pythonapi.Py_DecRef(ctypes.py_object(a))
# The garbage collector should now have a fatal error
# when it reaches the broken object
gc_collect()
''')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.stdout.close()
p.stderr.close()
# Verify that stderr has a useful error message:
self.assertRegex(stderr,
br'gcmodule\.c:[0-9]+: .*Assertion "gc_get_refs\(gc\) >= 0" failed.')
self.assertRegex(stderr,
br'refcount is too small')
# "address : 0x7fb5062efc18"
# "address : 7FB5062EFC18"
address_regex = br'[0-9a-fA-Fx]+'
self.assertRegex(stderr,
br'object address : ' + address_regex)
self.assertRegex(stderr,
br'object refcount : 1')
self.assertRegex(stderr,
br'object type : ' + address_regex)
self.assertRegex(stderr,
br'object type name: list')
self.assertRegex(stderr,
br'object repr : \[1, 2, 3\]')
class GCTogglingTests(unittest.TestCase):
def setUp(self):
gc.enable()
def tearDown(self):
gc.disable()
def test_bug1055820c(self):
# Corresponds to temp2c.py in the bug report. This is pretty
# elaborate.
c0 = C1055820(0)
# Move c0 into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_c0_alive = c0
del c0.loop # now only c1 keeps c0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
ouch = []
def callback(ignored):
ouch[:] = [c2wr()]
# The callback gets associated with a wr on an object in generation 2.
c0wr = weakref.ref(c0, callback)
c0 = c1 = c2 = None
# What we've set up: c0, c1, and c2 are all trash now. c0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's a
# global weakref to c2 (c2wr), but that weakref has no callback.
# There's also a global weakref to c0 (c0wr), and that does have a
# callback, and that callback references c2 via c2wr().
#
# c0 has a wr with callback, which references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see c0 at all, and c0 is
# the only object that has a weakref with a callback. gc clears c1
# and c2. Clearing c1 has the side effect of dropping the refcount on
# c0 to 0, so c0 goes away (despite that it's in an older generation)
# and c0's wr callback triggers. That in turn materializes a reference
# to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
# TODO(sgross): revisit this. no guaranteed "natural" collection, so trigger
# a collection manually.
gc.collect()
self.assertEqual(len(ouch), 1) # else the callback wasn't invoked
for x in ouch:
# If the callback resurrected c2, the instance would be damaged,
# with an empty __dict__.
self.assertEqual(x, None)
def test_bug1055820d(self):
# Corresponds to temp2d.py in the bug report. This is very much like
# test_bug1055820c, but uses a __del__ method instead of a weakref
# callback to sneak in a resurrection of cyclic trash.
ouch = []
class D(C1055820):
def __del__(self):
ouch[:] = [c2wr()]
d0 = D(0)
# Move all the above into generation 2.
gc.collect()
c1 = C1055820(1)
c1.keep_d0_alive = d0
del d0.loop # now only c1 keeps d0 alive
c2 = C1055820(2)
c2wr = weakref.ref(c2) # no callback!
d0 = c1 = c2 = None
# What we've set up: d0, c1, and c2 are all trash now. d0 is in
# generation 2. The only thing keeping it alive is that c1 points to
# it. c1 and c2 are in generation 0, and are in self-loops. There's
# a global weakref to c2 (c2wr), but that weakref has no callback.
# There are no other weakrefs.
#
# d0 has a __del__ method that references c2wr
# ^
# |
# | Generation 2 above dots
#. . . . . . . .|. . . . . . . . . . . . . . . . . . . . . . . .
# | Generation 0 below dots
# |
# |
# ^->c1 ^->c2 has a wr but no callback
# | | | |
# <--v <--v
#
# So this is the nightmare: when generation 0 gets collected, we see
# that c2 has a callback-free weakref, and c1 doesn't even have a
# weakref. Collecting generation 0 doesn't see d0 at all. gc clears
# c1 and c2. Clearing c1 has the side effect of dropping the refcount
# on d0 to 0, so d0 goes away (despite that it's in an older
# generation) and d0's __del__ triggers. That in turn materializes
# a reference to c2 via c2wr(), but c2 gets cleared anyway by gc.
# We want to let gc happen "naturally", to preserve the distinction
# between generations.
# TODO(sgross): revisit this. no guaranteed "natural" collection, so trigger
# a collection manually.
gc.collect()
self.assertEqual(len(ouch), 1) # else __del__ wasn't invoked
for x in ouch:
# If __del__ resurrected c2, the instance would be damaged, with an
# empty __dict__.
self.assertEqual(x, None)
class PythonFinalizationTests(unittest.TestCase):
def test_ast_fini(self):
# bpo-44184: Regression test for subtype_dealloc() when deallocating
# an AST instance also destroy its AST type: subtype_dealloc() must
# not access the type memory after deallocating the instance, since
# the type memory can be freed as well. The test is also related to
# _PyAST_Fini() which clears references to AST types.
code = textwrap.dedent("""
import ast
import codecs
# Small AST tree to keep their AST types alive
tree = ast.parse("def f(x, y): return 2*x-y")
x = [tree]
x.append(x)
# Put the cycle somewhere to survive until the last GC collection.
# Codec search functions are only cleared at the end of
# interpreter_clear().
def search_func(encoding):
return None
search_func.a = x
codecs.register(search_func)
""")
assert_python_ok("-c", code)
def setUpModule():
global enabled, debug
enabled = gc.isenabled()
gc.disable()
assert not gc.isenabled()
debug = gc.get_debug()
gc.set_debug(debug & ~gc.DEBUG_LEAK) # this test is supposed to leak
gc.collect() # Delete 2nd generation garbage
def tearDownModule():
gc.set_debug(debug)
# test gc.enable() even if GC is disabled by default
if verbose:
print("restoring automatic collection")
# make sure to always test gc.enable()
gc.enable()
assert gc.isenabled()
if not enabled:
gc.disable()
if __name__ == "__main__":
unittest.main()
|
utils.py | """Utilities shared by tests."""
import collections
import contextlib
import io
import logging
import os
import re
import selectors
import socket
import socketserver
import sys
import tempfile
import threading
import time
import unittest
import weakref
from unittest import mock
from http.server import HTTPServer
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
try:
import ssl
except ImportError: # pragma: no cover
ssl = None
from asyncio import base_events
from asyncio import events
from asyncio import format_helpers
from asyncio import futures
from asyncio import tasks
from asyncio.log import logger
from test import support
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
PEERCERT = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Nov 28 19:09:06 2027 GMT',
'notBefore': 'Jan 19 19:09:06 2018 GMT',
'serialNumber': '82EDBF41C880919C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
def simple_server_sslcontext():
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(ONLYCERT, ONLYKEY)
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
return server_context
def simple_client_sslcontext(*, disable_verify=True):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.check_hostname = False
if disable_verify:
client_context.verify_mode = ssl.CERT_NONE
return client_context
def dummy_ssl_context():
if ssl is None:
return None
else:
return ssl.SSLContext(ssl.PROTOCOL_TLS)
def run_briefly(loop):
async def once():
pass
gen = once()
t = loop.create_task(gen)
# Don't log a warning if the task is not done after run_until_complete().
# It occurs if the loop is stopped or if a task raises a BaseException.
t._log_destroy_pending = False
try:
loop.run_until_complete(t)
finally:
gen.close()
def run_until(loop, pred, timeout=30):
deadline = time.time() + timeout
while not pred():
if timeout is not None:
timeout = deadline - time.time()
if timeout <= 0:
raise futures.TimeoutError()
loop.run_until_complete(tasks.sleep(0.001, loop=loop))
def run_once(loop):
"""Legacy API to run once through the event loop.
This is the recommended pattern for test code. It will poll the
selector once and run all callbacks scheduled in response to I/O
events.
"""
loop.call_soon(loop.stop)
loop.run_forever()
class SilentWSGIRequestHandler(WSGIRequestHandler):
def get_stderr(self):
return io.StringIO()
def log_message(self, format, *args):
pass
class SilentWSGIServer(WSGIServer):
request_timeout = 2
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
return request, client_addr
def handle_error(self, request, client_address):
pass
class SSLWSGIServerMixin:
def finish_request(self, request, client_address):
# The relative location of our test directory (which
# contains the ssl key and certificate files) differs
# between the stdlib and stand-alone asyncio.
# Prefer our own if we can find it.
here = os.path.join(os.path.dirname(__file__), '..', 'tests')
if not os.path.isdir(here):
here = os.path.join(os.path.dirname(os.__file__),
'test', 'test_asyncio')
keyfile = os.path.join(here, 'ssl_key.pem')
certfile = os.path.join(here, 'ssl_cert.pem')
context = ssl.SSLContext()
context.load_cert_chain(certfile, keyfile)
ssock = context.wrap_socket(request, server_side=True)
try:
self.RequestHandlerClass(ssock, client_address, self)
ssock.close()
except OSError:
# maybe socket has been closed by peer
pass
class SSLWSGIServer(SSLWSGIServerMixin, SilentWSGIServer):
pass
def _run_test_server(*, address, use_ssl=False, server_cls, server_ssl_cls):
def app(environ, start_response):
status = '200 OK'
headers = [('Content-type', 'text/plain')]
start_response(status, headers)
return [b'Test message']
# Run the test WSGI server in a separate thread in order not to
# interfere with event handling in the main thread
server_class = server_ssl_cls if use_ssl else server_cls
httpd = server_class(address, SilentWSGIRequestHandler)
httpd.set_app(app)
httpd.address = httpd.server_address
server_thread = threading.Thread(
target=lambda: httpd.serve_forever(poll_interval=0.05))
server_thread.start()
try:
yield httpd
finally:
httpd.shutdown()
httpd.server_close()
server_thread.join()
if hasattr(socket, 'AF_UNIX'):
class UnixHTTPServer(socketserver.UnixStreamServer, HTTPServer):
def server_bind(self):
socketserver.UnixStreamServer.server_bind(self)
self.server_name = '127.0.0.1'
self.server_port = 80
class UnixWSGIServer(UnixHTTPServer, WSGIServer):
request_timeout = 2
def server_bind(self):
UnixHTTPServer.server_bind(self)
self.setup_environ()
def get_request(self):
request, client_addr = super().get_request()
request.settimeout(self.request_timeout)
# Code in the stdlib expects that get_request
# will return a socket and a tuple (host, port).
# However, this isn't true for UNIX sockets,
# as the second return value will be a path;
# hence we return some fake data sufficient
# to get the tests going
return request, ('127.0.0.1', '')
class SilentUnixWSGIServer(UnixWSGIServer):
def handle_error(self, request, client_address):
pass
class UnixSSLWSGIServer(SSLWSGIServerMixin, SilentUnixWSGIServer):
pass
def gen_unix_socket_path():
with tempfile.NamedTemporaryFile() as file:
return file.name
@contextlib.contextmanager
def unix_socket_path():
path = gen_unix_socket_path()
try:
yield path
finally:
try:
os.unlink(path)
except OSError:
pass
@contextlib.contextmanager
def run_test_unix_server(*, use_ssl=False):
with unix_socket_path() as path:
yield from _run_test_server(address=path, use_ssl=use_ssl,
server_cls=SilentUnixWSGIServer,
server_ssl_cls=UnixSSLWSGIServer)
@contextlib.contextmanager
def run_test_server(*, host='127.0.0.1', port=0, use_ssl=False):
yield from _run_test_server(address=(host, port), use_ssl=use_ssl,
server_cls=SilentWSGIServer,
server_ssl_cls=SSLWSGIServer)
def make_test_protocol(base):
dct = {}
for name in dir(base):
if name.startswith('__') and name.endswith('__'):
# skip magic names
continue
dct[name] = MockCallback(return_value=None)
return type('TestProtocol', (base,) + base.__bases__, dct)()
class TestSelector(selectors.BaseSelector):
def __init__(self):
self.keys = {}
def register(self, fileobj, events, data=None):
key = selectors.SelectorKey(fileobj, 0, events, data)
self.keys[fileobj] = key
return key
def unregister(self, fileobj):
return self.keys.pop(fileobj)
def select(self, timeout):
return []
def get_map(self):
return self.keys
class TestLoop(base_events.BaseEventLoop):
"""Loop for unittests.
It manages self time directly.
If something scheduled to be executed later then
on next loop iteration after all ready handlers done
generator passed to __init__ is calling.
Generator should be like this:
def gen():
...
when = yield ...
... = yield time_advance
Value returned by yield is absolute time of next scheduled handler.
Value passed to yield is time advance to move loop's time forward.
"""
def __init__(self, gen=None):
super().__init__()
if gen is None:
def gen():
yield
self._check_on_close = False
else:
self._check_on_close = True
self._gen = gen()
next(self._gen)
self._time = 0
self._clock_resolution = 1e-9
self._timers = []
self._selector = TestSelector()
self.readers = {}
self.writers = {}
self.reset_counters()
self._transports = weakref.WeakValueDictionary()
def time(self):
return self._time
def advance_time(self, advance):
"""Move test time forward."""
if advance:
self._time += advance
def close(self):
super().close()
if self._check_on_close:
try:
self._gen.send(0)
except StopIteration:
pass
else: # pragma: no cover
raise AssertionError("Time generator is not finished")
def _add_reader(self, fd, callback, *args):
self.readers[fd] = events.Handle(callback, args, self, None)
def _remove_reader(self, fd):
self.remove_reader_count[fd] += 1
if fd in self.readers:
del self.readers[fd]
return True
else:
return False
def assert_reader(self, fd, callback, *args):
if fd not in self.readers:
raise AssertionError(f'fd {fd} is not registered')
handle = self.readers[fd]
if handle._callback != callback:
raise AssertionError(
f'unexpected callback: {handle._callback} != {callback}')
if handle._args != args:
raise AssertionError(
f'unexpected callback args: {handle._args} != {args}')
def assert_no_reader(self, fd):
if fd in self.readers:
raise AssertionError(f'fd {fd} is registered')
def _add_writer(self, fd, callback, *args):
self.writers[fd] = events.Handle(callback, args, self, None)
def _remove_writer(self, fd):
self.remove_writer_count[fd] += 1
if fd in self.writers:
del self.writers[fd]
return True
else:
return False
def assert_writer(self, fd, callback, *args):
assert fd in self.writers, 'fd {} is not registered'.format(fd)
handle = self.writers[fd]
assert handle._callback == callback, '{!r} != {!r}'.format(
handle._callback, callback)
assert handle._args == args, '{!r} != {!r}'.format(
handle._args, args)
def _ensure_fd_no_transport(self, fd):
if not isinstance(fd, int):
try:
fd = int(fd.fileno())
except (AttributeError, TypeError, ValueError):
# This code matches selectors._fileobj_to_fd function.
raise ValueError("Invalid file object: "
"{!r}".format(fd)) from None
try:
transport = self._transports[fd]
except KeyError:
pass
else:
raise RuntimeError(
'File descriptor {!r} is used by transport {!r}'.format(
fd, transport))
def add_reader(self, fd, callback, *args):
"""Add a reader callback."""
self._ensure_fd_no_transport(fd)
return self._add_reader(fd, callback, *args)
def remove_reader(self, fd):
"""Remove a reader callback."""
self._ensure_fd_no_transport(fd)
return self._remove_reader(fd)
def add_writer(self, fd, callback, *args):
"""Add a writer callback.."""
self._ensure_fd_no_transport(fd)
return self._add_writer(fd, callback, *args)
def remove_writer(self, fd):
"""Remove a writer callback."""
self._ensure_fd_no_transport(fd)
return self._remove_writer(fd)
def reset_counters(self):
self.remove_reader_count = collections.defaultdict(int)
self.remove_writer_count = collections.defaultdict(int)
def _run_once(self):
super()._run_once()
for when in self._timers:
advance = self._gen.send(when)
self.advance_time(advance)
self._timers = []
def call_at(self, when, callback, *args, context=None):
self._timers.append(when)
return super().call_at(when, callback, *args, context=context)
def _process_events(self, event_list):
return
def _write_to_self(self):
pass
def MockCallback(**kwargs):
return mock.Mock(spec=['__call__'], **kwargs)
class MockPattern(str):
"""A regex based str with a fuzzy __eq__.
Use this helper with 'mock.assert_called_with', or anywhere
where a regex comparison between strings is needed.
For instance:
mock_call.assert_called_with(MockPattern('spam.*ham'))
"""
def __eq__(self, other):
return bool(re.search(str(self), other, re.S))
class MockInstanceOf:
def __init__(self, type):
self._type = type
def __eq__(self, other):
return isinstance(other, self._type)
def get_function_source(func):
source = format_helpers._get_function_source(func)
if source is None:
raise ValueError("unable to get the source of %r" % (func,))
return source
class TestCase(unittest.TestCase):
@staticmethod
def close_loop(loop):
executor = loop._default_executor
if executor is not None:
executor.shutdown(wait=True)
loop.close()
def set_event_loop(self, loop, *, cleanup=True):
assert loop is not None
# ensure that the event loop is passed explicitly in asyncio
events.set_event_loop(None)
if cleanup:
self.addCleanup(self.close_loop, loop)
def new_test_loop(self, gen=None):
loop = TestLoop(gen)
self.set_event_loop(loop)
return loop
def unpatch_get_running_loop(self):
events._get_running_loop = self._get_running_loop
def setUp(self):
self._get_running_loop = events._get_running_loop
events._get_running_loop = lambda: None
self._thread_cleanup = support.threading_setup()
def tearDown(self):
self.unpatch_get_running_loop()
events.set_event_loop(None)
# Detect CPython bug #23353: ensure that yield/yield-from is not used
# in an except block of a generator
self.assertEqual(sys.exc_info(), (None, None, None))
self.doCleanups()
support.threading_cleanup(*self._thread_cleanup)
support.reap_children()
@contextlib.contextmanager
def disable_logger():
"""Context manager to disable asyncio logger.
For example, it can be used to ignore warnings in debug mode.
"""
old_level = logger.level
try:
logger.setLevel(logging.CRITICAL+1)
yield
finally:
logger.setLevel(old_level)
def mock_nonblocking_socket(proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM,
family=socket.AF_INET):
"""Create a mock of a non-blocking socket."""
sock = mock.MagicMock(socket.socket)
sock.proto = proto
sock.type = type
sock.family = family
sock.gettimeout.return_value = 0.0
return sock
|
ChickenVision.py | #!/usr/bin/env python3
#----------------------------------------------------------------------------
# Copyright (c) 2018 FIRST. All Rights Reserved.
# Open Source Software - may be modified and shared by FRC teams. The code
# must be accompanied by the FIRST BSD license file in the root directory of
# the project.
# My 2019 license: use it as much as you want. Crediting is recommended because it lets me know that I am being useful.
# Credit to Screaming Chickens 3997
# This is meant to be used in conjuction with WPILib Raspberry Pi image: https://github.com/wpilibsuite/FRCVision-pi-gen
#----------------------------------------------------------------------------
import json
import time
import sys
from threading import Thread
from cscore import CameraServer, VideoSource
from networktables import NetworkTablesInstance
import cv2
import numpy as np
from networktables import NetworkTables
import math
########### SET RESOLUTION TO 256x144 !!!! ############
# import the necessary packages
import datetime
#Class to examine Frames per second of camera stream. Currently not used.
class FPS:
def __init__(self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self._numFrames = 0
def start(self):
# start the timer
self._start = datetime.datetime.now()
return self
def stop(self):
# stop the timer
self._end = datetime.datetime.now()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self._numFrames += 1
def elapsed(self):
# return the total number of seconds between the start and
# end interval
return (self._end - self._start).total_seconds()
def fps(self):
# compute the (approximate) frames per second
return self._numFrames / self.elapsed()
# class that runs separate thread for showing video,
class VideoShow:
"""
Class that continuously shows a frame using a dedicated thread.
"""
def __init__(self, imgWidth, imgHeight, cameraServer, frame=None, name='stream'):
self.outputStream = cameraServer.putVideo(name, imgWidth, imgHeight)
self.frame = frame
self.stopped = False
def start(self):
Thread(target=self.show, args=()).start()
return self
def show(self):
while not self.stopped:
self.outputStream.putFrame(self.frame)
def stop(self):
self.stopped = True
def notifyError(self, error):
self.outputStream.notifyError(error)
class WebcamVideoStream:
def __init__(self, camera, cameraServer, frameWidth, frameHeight, name="WebcamVideoStream"):
# initialize the video camera stream and read the first frame
# from the stream
#Automatically sets exposure to 0 to track tape
self.webcam = camera
self.webcam.setExposureManual(0)
#Some booleans so that we don't keep setting exposure over and over to the same value
self.autoExpose = False
self.prevValue = self.autoExpose
#Make a blank image to write on
self.img = np.zeros(shape=(frameWidth, frameHeight, 3), dtype=np.uint8)
#Gets the video
self.stream = cameraServer.getVideo(camera = camera)
(self.timestamp, self.img) = self.stream.grabFrame(self.img)
# initialize the thread name
self.name = name
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, name=self.name, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
#Boolean logic we don't keep setting exposure over and over to the same value
if self.autoExpose:
self.webcam.setExposureAuto()
else:
self.webcam.setExposureManual(0)
#gets the image and timestamp from cameraserver
(self.timestamp, self.img) = self.stream.grabFrame(self.img)
def read(self):
# return the frame most recently read
return self.timestamp, self.img
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def getError(self):
return self.stream.getError()
###################### PROCESSING OPENCV ################################
#Angles in radians
#image size ratioed to 16:9
image_width = 256
image_height = 144
#Lifecam 3000 from datasheet
#Datasheet: https://dl2jx7zfbtwvr.cloudfront.net/specsheets/WEBC1010.pdf
diagonalView = math.radians(68.5)
#16:9 aspect ratio
horizontalAspect = 16
verticalAspect = 9
#Reasons for using diagonal aspect is to calculate horizontal field of view.
diagonalAspect = math.hypot(horizontalAspect, verticalAspect)
#Calculations: http://vrguy.blogspot.com/2013/04/converting-diagonal-field-of-view-and.html
horizontalView = math.atan(math.tan(diagonalView/2) * (horizontalAspect / diagonalAspect)) * 2
verticalView = math.atan(math.tan(diagonalView/2) * (verticalAspect / diagonalAspect)) * 2
#Focal Length calculations: https://docs.google.com/presentation/d/1ediRsI-oR3-kwawFJZ34_ZTlQS2SDBLjZasjzZ-eXbQ/pub?start=false&loop=false&slide=id.g12c083cffa_0_165
H_FOCAL_LENGTH = image_width / (2*math.tan((horizontalView/2)))
V_FOCAL_LENGTH = image_height / (2*math.tan((verticalView/2)))
#blurs have to be odd
green_blur = 1
orange_blur = 27
# define range of green of retroreflective tape in HSV
lower_green = np.array([67.62589835434508,83.47122433374253, 68.70503793946273])
upper_green = np.array([93.99317406143345, 255.0, 255.0])
#define range of orange from cargo ball in HSV
lower_orange = np.array([0,193,92])
upper_orange = np.array([23, 255, 255])
#Flip image if camera mounted upside down
def flipImage(frame):
return cv2.flip( frame, -1 )
#Blurs frame
def blurImg(frame, blur_radius):
img = frame.copy()
blur = cv2.blur(img,(blur_radius,blur_radius))
return blur
# Masks the video based on a range of hsv colors
# Takes in a frame, range of color, and a blurred frame, returns a masked frame
def threshold_video(lower_color, upper_color, blur):
# Convert BGR to HSV
hsv = cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)
# hold the HSV image to get only red colors
mask = cv2.inRange(hsv, lower_color, upper_color)
# Returns the masked imageBlurs video to smooth out image
return mask
# Finds the tape targets from the masked image and displays them on original stream + network tales
def findTargets(frame, mask):
# Finds contours
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)
# Take each frame
# Gets the shape of video
screenHeight, screenWidth, _ = frame.shape
# Gets center of height and width
centerX = (screenWidth / 2) - .5
centerY = (screenHeight / 2) - .5
# Copies frame and stores it in image
image = frame.copy()
# Processes the contours, takes in (contours, output_image, (centerOfImage)
if len(contours) != 0:
image = findTape(contours, image, centerX, centerY)
else:
# pushes that it deosn't see vision target to network tables
networkTable.putBoolean("tapeDetected", False)
# Shows the contours overlayed on the original video
return image
# Finds the balls from the masked image and displays them on original stream + network tables
def findCargo(frame, mask):
# Finds contours
_, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_TC89_KCOS)
# Take each frame
# Gets the shape of video
screenHeight, screenWidth, _ = frame.shape
# Gets center of height and width
centerX = (screenWidth / 2) - .5
centerY = (screenHeight / 2) - .5
# Copies frame and stores it in image
image = frame.copy()
# Processes the contours, takes in (contours, output_image, (centerOfImage)
if len(contours) != 0:
image = findBall(contours, image, centerX, centerY)
else:
# pushes that it doesn't see cargo to network tables
networkTable.putBoolean("cargoDetected", False)
# Shows the contours overlayed on the original video
return image
# Draws Contours and finds center and yaw of orange ball
# centerX is center x coordinate of image
# centerY is center y coordinate of image
def findBall(contours, image, centerX, centerY):
screenHeight, screenWidth, channels = image.shape;
#Seen vision targets (correct angle, adjacent to each other)
cargo = []
if len(contours) > 0:
#Sort contours by area size (biggest to smallest)
cntsSorted = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
biggestCargo = []
for cnt in cntsSorted:
x, y, w, h = cv2.boundingRect(cnt)
aspect_ratio = float(w) / h
# Get moments of contour; mainly for centroid
M = cv2.moments(cnt)
# Get convex hull (bounding polygon on contour)
hull = cv2.convexHull(cnt)
# Calculate Contour area
cntArea = cv2.contourArea(cnt)
# Filters contours based off of size
if (checkBall(cntArea, aspect_ratio)):
### MOSTLY DRAWING CODE, BUT CALCULATES IMPORTANT INFO ###
# Gets the centeroids of contour
if M["m00"] != 0:
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
else:
cx, cy = 0, 0
if(len(biggestCargo) < 3):
##### DRAWS CONTOUR######
# Gets rotated bounding rectangle of contour
rect = cv2.minAreaRect(cnt)
# Creates box around that rectangle
box = cv2.boxPoints(rect)
# Not exactly sure
box = np.int0(box)
# Draws rotated rectangle
cv2.drawContours(image, [box], 0, (23, 184, 80), 3)
# Draws a vertical white line passing through center of contour
cv2.line(image, (cx, screenHeight), (cx, 0), (255, 255, 255))
# Draws a white circle at center of contour
cv2.circle(image, (cx, cy), 6, (255, 255, 255))
# Draws the contours
cv2.drawContours(image, [cnt], 0, (23, 184, 80), 1)
# Gets the (x, y) and radius of the enclosing circle of contour
(x, y), radius = cv2.minEnclosingCircle(cnt)
# Rounds center of enclosing circle
center = (int(x), int(y))
# Rounds radius of enclosning circle
radius = int(radius)
# Makes bounding rectangle of contour
rx, ry, rw, rh = cv2.boundingRect(cnt)
# Draws countour of bounding rectangle and enclosing circle in green
cv2.rectangle(image, (rx, ry), (rx + rw, ry + rh), (23, 184, 80), 1)
cv2.circle(image, center, radius, (23, 184, 80), 1)
# Appends important info to array
if not biggestCargo:
biggestCargo.append([cx, cy])
elif [cx, cy, cnt] not in biggestCargo:
biggestCargo.append([cx, cy])
# Check if there are cargo seen
if (len(biggestCargo) > 0):
#pushes that it sees cargo to network tables
networkTable.putBoolean("cargoDetected", True)
# Sorts targets based on x coords to break any angle tie
biggestCargo.sort(key=lambda x: math.fabs(x[0]))
closestCargo = min(biggestCargo, key=lambda x: (math.fabs(x[0] - centerX)))
xCoord = closestCargo[0]
finalTarget = calculateYaw(xCoord, centerX, H_FOCAL_LENGTH)
print("Yaw: " + str(finalTarget))
# Puts the yaw on screen
# Draws yaw of target + line where center of target is
cv2.putText(image, "Yaw: " + str(finalTarget), (40, 40), cv2.FONT_HERSHEY_COMPLEX, .6,
(255, 255, 255))
cv2.line(image, (int(xCoord), screenHeight), (int(xCoord), 0), (255, 0, 0), 2)
currentAngleError = finalTarget
#pushes cargo angle to network tables
networkTable.putNumber("cargoYaw", currentAngleError)
else:
#pushes that it doesn't see cargo to network tables
networkTable.putBoolean("cargoDetected", False)
cv2.line(image, (int(centerX), screenHeight), (int(centerX), 0), (255, 255, 255), 2)
return image
# Draws Contours and finds center and yaw of vision targets
# centerX is center x coordinate of image
# centerY is center y coordinate of image
def findTape(contours, image, centerX, centerY):
screenHeight, screenWidth, channels = image.shape;
#Seen vision targets (correct angle, adjacent to each other)
targets = []
if len(contours) >= 2:
#Sort contours by area size (biggest to smallest)
cntsSorted = sorted(contours, key=lambda x: cv2.contourArea(x), reverse=True)
biggestCnts = []
for cnt in cntsSorted:
# Get moments of contour; mainly for centroid
M = cv2.moments(cnt)
# Get convex hull (bounding polygon on contour)
hull = cv2.convexHull(cnt)
# Calculate Contour area
cntArea = cv2.contourArea(cnt)
# calculate area of convex hull
hullArea = cv2.contourArea(hull)
# Filters contours based off of size
if (checkContours(cntArea, hullArea)):
### MOSTLY DRAWING CODE, BUT CALCULATES IMPORTANT INFO ###
# Gets the centeroids of contour
if M["m00"] != 0:
cx = int(M["m10"] / M["m00"])
cy = int(M["m01"] / M["m00"])
else:
cx, cy = 0, 0
if(len(biggestCnts) < 13):
#### CALCULATES ROTATION OF CONTOUR BY FITTING ELLIPSE ##########
rotation = getEllipseRotation(image, cnt)
# Calculates yaw of contour (horizontal position in degrees)
yaw = calculateYaw(cx, centerX, H_FOCAL_LENGTH)
# Calculates yaw of contour (horizontal position in degrees)
pitch = calculatePitch(cy, centerY, V_FOCAL_LENGTH)
##### DRAWS CONTOUR######
# Gets rotated bounding rectangle of contour
rect = cv2.minAreaRect(cnt)
# Creates box around that rectangle
box = cv2.boxPoints(rect)
# Not exactly sure
box = np.int0(box)
# Draws rotated rectangle
cv2.drawContours(image, [box], 0, (23, 184, 80), 3)
# Calculates yaw of contour (horizontal position in degrees)
yaw = calculateYaw(cx, centerX, H_FOCAL_LENGTH)
# Calculates yaw of contour (horizontal position in degrees)
pitch = calculatePitch(cy, centerY, V_FOCAL_LENGTH)
# Draws a vertical white line passing through center of contour
cv2.line(image, (cx, screenHeight), (cx, 0), (255, 255, 255))
# Draws a white circle at center of contour
cv2.circle(image, (cx, cy), 6, (255, 255, 255))
# Draws the contours
cv2.drawContours(image, [cnt], 0, (23, 184, 80), 1)
# Gets the (x, y) and radius of the enclosing circle of contour
(x, y), radius = cv2.minEnclosingCircle(cnt)
# Rounds center of enclosing circle
center = (int(x), int(y))
# Rounds radius of enclosning circle
radius = int(radius)
# Makes bounding rectangle of contour
rx, ry, rw, rh = cv2.boundingRect(cnt)
boundingRect = cv2.boundingRect(cnt)
# Draws countour of bounding rectangle and enclosing circle in green
cv2.rectangle(image, (rx, ry), (rx + rw, ry + rh), (23, 184, 80), 1)
cv2.circle(image, center, radius, (23, 184, 80), 1)
# Appends important info to array
if not biggestCnts:
biggestCnts.append([cx, cy, rotation])
elif [cx, cy, rotation] not in biggestCnts:
biggestCnts.append([cx, cy, rotation])
# Sorts array based on coordinates (leftmost to rightmost) to make sure contours are adjacent
biggestCnts = sorted(biggestCnts, key=lambda x: x[0])
# Target Checking
for i in range(len(biggestCnts) - 1):
#Rotation of two adjacent contours
tilt1 = biggestCnts[i][2]
tilt2 = biggestCnts[i + 1][2]
#x coords of contours
cx1 = biggestCnts[i][0]
cx2 = biggestCnts[i + 1][0]
cy1 = biggestCnts[i][1]
cy2 = biggestCnts[i + 1][1]
# If contour angles are opposite
if (np.sign(tilt1) != np.sign(tilt2)):
centerOfTarget = math.floor((cx1 + cx2) / 2)
#ellipse negative tilt means rotated to right
#Note: if using rotated rect (min area rectangle)
# negative tilt means rotated to left
# If left contour rotation is tilted to the left then skip iteration
if (tilt1 > 0):
if (cx1 < cx2):
continue
# If left contour rotation is tilted to the left then skip iteration
if (tilt2 > 0):
if (cx2 < cx1):
continue
#Angle from center of camera to target (what you should pass into gyro)
yawToTarget = calculateYaw(centerOfTarget, centerX, H_FOCAL_LENGTH)
#Make sure no duplicates, then append
if not targets:
targets.append([centerOfTarget, yawToTarget])
elif [centerOfTarget, yawToTarget] not in targets:
targets.append([centerOfTarget, yawToTarget])
#Check if there are targets seen
if (len(targets) > 0):
# pushes that it sees vision target to network tables
networkTable.putBoolean("tapeDetected", True)
#Sorts targets based on x coords to break any angle tie
targets.sort(key=lambda x: math.fabs(x[0]))
finalTarget = min(targets, key=lambda x: math.fabs(x[1]))
# Puts the yaw on screen
#Draws yaw of target + line where center of target is
cv2.putText(image, "Yaw: " + str(finalTarget[1]), (40, 40), cv2.FONT_HERSHEY_COMPLEX, .6,
(255, 255, 255))
cv2.line(image, (finalTarget[0], screenHeight), (finalTarget[0], 0), (255, 0, 0), 2)
currentAngleError = finalTarget[1]
# pushes vision target angle to network tables
networkTable.putNumber("tapeYaw", currentAngleError)
else:
# pushes that it deosn't see vision target to network tables
networkTable.putBoolean("tapeDetected", False)
cv2.line(image, (round(centerX), screenHeight), (round(centerX), 0), (255, 255, 255), 2)
return image
# Checks if tape contours are worthy based off of contour area and (not currently) hull area
def checkContours(cntSize, hullSize):
return cntSize > (image_width / 6)
# Checks if ball contours are worthy based off of contour area and (not currently) hull area
def checkBall(cntSize, cntAspectRatio):
return (cntSize > (image_width / 2)) and (round(cntAspectRatio) == 1)
#Forgot how exactly it works, but it works!
def translateRotation(rotation, width, height):
if (width < height):
rotation = -1 * (rotation - 90)
if (rotation > 90):
rotation = -1 * (rotation - 180)
rotation *= -1
return round(rotation)
def calculateDistance(heightOfCamera, heightOfTarget, pitch):
heightOfTargetFromCamera = heightOfTarget - heightOfCamera
# Uses trig and pitch to find distance to target
'''
d = distance
h = height between camera and target
a = angle = pitch
tan a = h/d (opposite over adjacent)
d = h / tan a
.
/|
/ |
/ |h
/a |
camera -----
d
'''
distance = math.fabs(heightOfTargetFromCamera / math.tan(math.radians(pitch)))
return distance
# Uses trig and focal length of camera to find yaw.
# Link to further explanation: https://docs.google.com/presentation/d/1ediRsI-oR3-kwawFJZ34_ZTlQS2SDBLjZasjzZ-eXbQ/pub?start=false&loop=false&slide=id.g12c083cffa_0_298
def calculateYaw(pixelX, centerX, hFocalLength):
yaw = math.degrees(math.atan((pixelX - centerX) / hFocalLength))
return round(yaw)
# Link to further explanation: https://docs.google.com/presentation/d/1ediRsI-oR3-kwawFJZ34_ZTlQS2SDBLjZasjzZ-eXbQ/pub?start=false&loop=false&slide=id.g12c083cffa_0_298
def calculatePitch(pixelY, centerY, vFocalLength):
pitch = math.degrees(math.atan((pixelY - centerY) / vFocalLength))
# Just stopped working have to do this:
pitch *= -1
return round(pitch)
def getEllipseRotation(image, cnt):
try:
# Gets rotated bounding ellipse of contour
ellipse = cv2.fitEllipse(cnt)
centerE = ellipse[0]
# Gets rotation of ellipse; same as rotation of contour
rotation = ellipse[2]
# Gets width and height of rotated ellipse
widthE = ellipse[1][0]
heightE = ellipse[1][1]
# Maps rotation to (-90 to 90). Makes it easier to tell direction of slant
rotation = translateRotation(rotation, widthE, heightE)
cv2.ellipse(image, ellipse, (23, 184, 80), 3)
return rotation
except:
# Gets rotated bounding rectangle of contour
rect = cv2.minAreaRect(cnt)
# Creates box around that rectangle
box = cv2.boxPoints(rect)
# Not exactly sure
box = np.int0(box)
# Gets center of rotated rectangle
center = rect[0]
# Gets rotation of rectangle; same as rotation of contour
rotation = rect[2]
# Gets width and height of rotated rectangle
width = rect[1][0]
height = rect[1][1]
# Maps rotation to (-90 to 90). Makes it easier to tell direction of slant
rotation = translateRotation(rotation, width, height)
return rotation
#################### FRC VISION PI Image Specific #############
configFile = "/boot/frc.json"
class CameraConfig: pass
team = None
server = False
cameraConfigs = []
"""Report parse error."""
def parseError(str):
print("config error in '" + configFile + "': " + str, file=sys.stderr)
"""Read single camera configuration."""
def readCameraConfig(config):
cam = CameraConfig()
# name
try:
cam.name = config["name"]
except KeyError:
parseError("could not read camera name")
return False
# path
try:
cam.path = config["path"]
except KeyError:
parseError("camera '{}': could not read path".format(cam.name))
return False
cam.config = config
cameraConfigs.append(cam)
return True
"""Read configuration file."""
def readConfig():
global team
global server
# parse file
try:
with open(configFile, "rt") as f:
j = json.load(f)
except OSError as err:
print("could not open '{}': {}".format(configFile, err), file=sys.stderr)
return False
# top level must be an object
if not isinstance(j, dict):
parseError("must be JSON object")
return False
# team number
try:
team = j["team"]
except KeyError:
parseError("could not read team number")
return False
# ntmode (optional)
if "ntmode" in j:
str = j["ntmode"]
if str.lower() == "client":
server = False
elif str.lower() == "server":
server = True
else:
parseError("could not understand ntmode value '{}'".format(str))
# cameras
try:
cameras = j["cameras"]
except KeyError:
parseError("could not read cameras")
return False
for camera in cameras:
if not readCameraConfig(camera):
return False
return True
"""Start running the camera."""
def startCamera(config):
print("Starting camera '{}' on {}".format(config.name, config.path))
cs = CameraServer.getInstance()
camera = cs.startAutomaticCapture(name=config.name, path=config.path)
camera.setConfigJson(json.dumps(config.config))
return cs, camera
if __name__ == "__main__":
if len(sys.argv) >= 2:
configFile = sys.argv[1]
# read configuration
if not readConfig():
sys.exit(1)
# start NetworkTables
ntinst = NetworkTablesInstance.getDefault()
#Name of network table - this is how it communicates with robot. IMPORTANT
networkTable = NetworkTables.getTable('ChickenVision')
if server:
print("Setting up NetworkTables server")
ntinst.startServer()
else:
print("Setting up NetworkTables client for team {}".format(team))
ntinst.startClientTeam(team)
# start cameras
cameras = []
streams = []
for cameraConfig in cameraConfigs:
cs, cameraCapture = startCamera(cameraConfig)
streams.append(cs)
cameras.append(cameraCapture)
#Get the first camera
webcam = cameras[0]
cameraServer = streams[0]
#Start thread reading camera
cap = WebcamVideoStream(webcam, cameraServer, image_width, image_height).start()
# (optional) Setup a CvSource. This will send images back to the Dashboard
# Allocating new images is very expensive, always try to preallocate
img = np.zeros(shape=(image_height, image_width, 3), dtype=np.uint8)
#Start thread outputing stream
streamViewer = VideoShow(image_width,image_height, cameraServer, frame=img, name="ChickenVision").start()
#cap.autoExpose=True;
tape = False
fps = FPS().start()
#TOTAL_FRAMES = 200;
# loop forever
while True:
# Tell the CvSink to grab a frame from the camera and put it
# in the source image. If there is an error notify the output.
timestamp, img = cap.read()
#Uncomment if camera is mounted upside down
frame = flipImage(img)
#Comment out if camera is mounted upside down
#frame = img
if timestamp == 0:
# Send the output the error.
streamViewer.notifyError(cap.getError());
# skip the rest of the current iteration
continue
#Checks if you just want camera for driver (No processing), False by default
if(networkTable.getBoolean("Driver", False)):
cap.autoExpose = True
processed = frame
else:
# Checks if you just want camera for Tape processing , False by default
if(networkTable.getBoolean("Tape", True)):
#Lowers exposure to 0
cap.autoExpose = False
boxBlur = blurImg(frame, green_blur)
threshold = threshold_video(lower_green, upper_green, boxBlur)
processed = findTargets(frame, threshold)
else:
# Checks if you just want camera for Cargo processing, by dent of everything else being false, true by default
cap.autoExpose = True
boxBlur = blurImg(frame, orange_blur)
threshold = threshold_video(lower_orange, upper_orange, boxBlur)
processed = findCargo(frame, threshold)
#Puts timestamp of camera on netowrk tables
networkTable.putNumber("VideoTimestamp", timestamp)
streamViewer.frame = processed
# update the FPS counter
fps.update()
#Flushes camera values to reduce latency
ntinst.flush()
#Doesn't do anything at the moment. You can easily get this working by indenting these three lines
# and setting while loop to: while fps._numFrames < TOTAL_FRAMES
fps.stop()
print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
|
replicator_manager.py | from multiprocessing import Process, Queue, Event, Manager
import time
import json
import random
import requests
from src.replicator import Replicator
class ReplicatorManager(Replicator):
def __init__(self, name, ip, port, log_server_path, log_server_lock, n_replicators=3):
super().__init__(
name=name,
ip=ip,
port=port,
log_server_path=log_server_path,
log_server_lock=log_server_lock,
server=False
)
# Replicators
self.replicators = []
for i in range(1, n_replicators+1):
r = Replicator(
name=f'Replicator_{i}',
ip=self.ip,
port=self.port+i,
log_server_path=self.log_server_path,
log_server_lock=self.log_server_lock
)
self.replicators.append(r)
r.start()
# Variables
#self.request_queue = Queue()
self.manager = Manager()
self.request_queue = self.manager.list()
self.request_answer = self.manager.dict()
# URL
self.add_endpoint(endpoint='/', handler=self.get_request, methods=['POST'])
# Process to solve requests
self.solver_process = Process(target=self.solve_request)
self.solver_process.start()
return
def run(self):
super().run()
#Kill childs
self.solver_process.terminate()
for r in self.replicators:
r.terminate()
return
def get_request(self):
data, status = self.get_data(keys=['request', 'timestamp', 'send_id'])
if status != 200:
self.log(data)
return data, status
event_wait = self.manager.Event()
self.log(f"[RECIVE][REQUEST][{data['send_id']}][{data['request']['type'].upper()}]: {data['request']['data']}")
#self.request_queue.put(
self.request_queue.append(
(
data['timestamp'],
data['send_id'],
data['request'],
event_wait
)
)
request_answer_key = (data['send_id'], data['timestamp'])
event_wait.wait()
answer = self.request_answer[request_answer_key]
return answer[0], answer[1]
def make_request(self, replicator, client_name, data, rq_type, request_answer_key):
data['send_id'] = self.name
self.log(f'[SEND][REQUEST][{replicator.name}]: from {client_name} > {data}')
answer = requests.post(
f'http://{replicator.ip}:{replicator.port}/{rq_type}_file',
json=data
)
self.log(f'[RECIVE][RESPONSE][{replicator.name}][{answer.status_code}]: {answer.text}')
if answer.status_code != 200:
self.request_answer[request_answer_key] = (answer.text, answer.status_code)
return
def solve_request(self):
while True:
#if not self.request_queue.empty():
if self.request_queue:
# Sort requests by timestamp
self.request_queue.sort()
# Picking request
client_timestamp, client_name, client_request, event_wait = self.request_queue.pop(0)
self.log(f'[EXECUTE][({client_timestamp},{client_name})]: {client_request}')
client_request['data']['send_id'] = client_name
if client_request['type'] == 'create':
answer = self.create_file(data=client_request['data'])
elif client_request['type'] == 'update':
answer = self.update_file(data=client_request['data'])
elif client_request['type'] == 'append':
answer = self.append_file(data=client_request['data'])
elif client_request['type'] == 'delete':
answer = self.delete_file(data=client_request['data'])
elif client_request['type'] == 'get':
answer = self.get_file(data=client_request['data'])
request_answer_key = (client_name, client_timestamp)
self.request_answer[request_answer_key] = answer
# If an Error occurs
if answer[1] != 200:
event_wait.set()
continue
# Send request to relicators
replicators_request = []
for replicator in self.replicators:
#print(f"Encaminhando {client_request} para {replicator.name}")
replicators_request.append(
Process(
target=self.make_request,
args=(
replicator,
client_name,
client_request['data'],
client_request['type'],
request_answer_key,
)
)
)
for rq in replicators_request:
rq.start()
# Waiting for replicators
for rq in replicators_request:
rq.join()
event_wait.set()
continue
|
__init__.py | '''
Python Multicopter class
Uses UDP sockets to communicate with MulticopterSim
Copyright(C) 2019 Simon D.Levy
MIT License
'''
from threading import Thread
import socket
import numpy as np
class Multicopter(object):
'''
Represents a Multicopter object communicating with MulticopterSim via UDP socket calls.
'''
def __init__(self, host='127.0.0.1', motorPort=5000, telemetryPort=5001, motorCount=4):
'''
Creates a Multicopter object.
host - name of host running MulticopterSim
motorPort - port over which this object will send motor commands to host
telemeteryPort - port over which this object will receive telemetry from host
motorCount - number of motors in vehicle running in simulator on host
'''
self.motorSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.motorSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
self.telemSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.telemSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
self.telemSocket.bind((host, telemetryPort))
self.host = host
self.motorPort = motorPort
self.motorCount = motorCount
self.thread = Thread(target=self._run)
self.thread.daemon = True
self.motorVals = np.zeros(motorCount)
self.state = np.zeros(11)
self.ready = False
def start(self):
'''
Begins communication with simulator running on host.
'''
self.thread.start()
def isReady(self):
return self.ready
def getState(self):
'''
Returns current vehicle state as an array of the form [time, gx, gy, gz, ax, ay, az, px, py, pz],
where g=gyro; a=accelerometer; p=position.
'''
return self.state
def setMotors(self, motorVals):
'''
Sets motor values between 0 and 1.
'''
self.motorVals = np.copy(motorVals)
def _run(self):
while True:
data, _ = self.telemSocket.recvfrom(80)
self.state = np.frombuffer(data)
self.ready = True
if self.state[0] < 0:
self.motorSocket.close()
self.telemSocket.close()
break
self.motorSocket.sendto(np.ndarray.tobytes(self.motorVals), (self.host, self.motorPort))
|
main_window.py | import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
import asyncio
from electrum.bitcoin import TYPE_ADDRESS
from electrum.storage import WalletStorage
from electrum.wallet import Wallet, InternalAddressCorruption
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword, send_exception_to_crash_reporter
from electrum.plugin import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from electrum import blockchain
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from .i18n import _
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds, UnknownBaseUnit,
DECIMAL_POINT_DEFAULT)
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_forkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(auto_connect=self.auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
oneserver = BooleanProperty(False)
def on_oneserver(self, instance, x):
net_params = self.network.get_parameters()
net_params = net_params._replace(oneserver=self.oneserver)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def toggle_oneserver(self, x):
self.oneserver = not self.oneserver
proxy_str = StringProperty('')
def update_proxy_str(self, proxy: dict):
mode = proxy.get('mode')
host = proxy.get('host')
port = proxy.get('port')
self.proxy_str = (host + ':' + port) if mode else _('None')
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
with blockchain.blockchains_lock: blockchain_items = list(blockchain.blockchains.items())
for chain_id, b in blockchain_items:
if name == b.get_name():
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
chain_objects = [blockchain.blockchains.get(chain_id) for chain_id in chains]
chain_objects = filter(lambda b: b is not None, chain_objects)
names = [b.get_name() for b in chain_objects]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_status()
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
if self.wallet:
self.wallet.clear_coin_price_cache()
self._trigger_update_history()
def on_fee_histogram(self, *args):
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
return decimal_point_to_base_unit_name(decimal_point)
except UnknownBaseUnit:
return decimal_point_to_base_unit_name(DECIMAL_POINT_DEFAULT)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
wallet_name = StringProperty(_('No Wallet'))
base_unit = AliasProperty(_get_bu, _set_bu)
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
self.asyncio_loop = asyncio.get_event_loop()
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None) # type: Network
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
net_params = self.network.get_parameters()
self.server_host = net_params.host
self.server_port = net_params.port
self.auto_connect = net_params.auto_connect
self.oneserver = net_params.oneserver
self.proxy_config = net_params.proxy if net_params.proxy else {}
self.update_proxy_str(self.proxy_config)
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
self._periodic_update_status_during_sync = Clock.schedule_interval(self.update_wallet_synchronizing_progress, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False, text_for_clipboard=None):
from .uix.dialogs.qr_dialog import QRDialog
def on_qr_failure():
popup.dismiss()
msg = _('Failed to display QR code.')
if text_for_clipboard:
msg += '\n' + _('Text copied to clipboard.')
self._clipboard.copy(text_for_clipboard)
Clock.schedule_once(lambda dt: self.show_info(msg))
popup = QRDialog(title, data, show_text, failure_cb=on_qr_failure,
text_for_clipboard=text_for_clipboard)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
except Exception as e: # exc would otherwise get lost
send_exception_to_crash_reporter(e)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'status', 'new_transaction', 'verified']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_fee_histogram, ['fee_histogram'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, storage):
if storage:
wallet = Wallet(storage)
wallet.start_network(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
def launch_wizard():
wizard = Factory.InstallWizard(self.electrum_config, self.plugins)
wizard.path = path
wizard.bind(on_wizard_complete=self.on_wizard_complete)
storage = WalletStorage(path, manual_upgrades=True)
if not storage.file_exists():
wizard.run('new')
elif storage.is_encrypted():
raise Exception("Kivy GUI does not support encrypted wallet files.")
elif storage.requires_upgrade():
wizard.upgrade_storage(storage)
else:
raise Exception("unexpected storage file situation")
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "electrum/gui/icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
net_params = self.network.get_parameters()
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_forkpoint = chain.get_max_forkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
else:
self.server_host = str(net_params.host) + ' (connecting...)'
self.proxy_config = net_params.proxy or {}
self.update_proxy_str(self.proxy_config)
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'network_updated':
self._trigger_update_interfaces()
self._trigger_update_status()
elif event == 'wallet_updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'blockchain_updated':
# to update number of confirmations in history
self._trigger_update_wallet()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.wallet_name = wallet.basename()
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def update_status(self, *dt):
if not self.wallet:
return
if self.network is None or not self.network.is_connected():
status = _("Offline")
elif self.network.is_connected():
self.num_blocks = self.network.get_local_height()
server_height = self.network.get_server_height()
server_lag = self.num_blocks - server_height
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
status = ("{} [size=18dp]({}/{})[/size]"
.format(_("Synchronizing..."), num_answered, num_sent))
elif server_lag > 1:
status = _("Server is lagging ({} blocks)").format(server_lag)
else:
status = ''
else:
status = _("Disconnected")
if status:
self.balance = status
self.fiat_balance = status
else:
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def update_wallet_synchronizing_progress(self, *dt):
if not self.wallet:
return
if not self.wallet.up_to_date:
self._trigger_update_status()
def get_max_amount(self):
from electrum.transaction import TxOutput
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [TxOutput(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo python3 -m pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
Clock.schedule_once(lambda dt: on_complete(status, msg))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
msg = msg or ''
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
materialized_views_test.py | import collections
import re
import sys
import time
import traceback
import pytest
import threading
import logging
from flaky import flaky
from enum import Enum
from queue import Empty
from functools import partial
from multiprocessing import Process, Queue
from cassandra import ConsistencyLevel, InvalidRequest, WriteFailure
from cassandra.cluster import NoHostAvailable
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
from distutils.version import LooseVersion
from dtest import Tester, get_ip_from_node, create_ks, mk_bman_path
from tools.assertions import (assert_all, assert_crc_check_chance_equal,
assert_invalid, assert_none, assert_one,
assert_unavailable)
from tools.data import rows_to_list
from tools.misc import new_node
from tools.jmxutils import (JolokiaAgent, make_mbean)
since = pytest.mark.since
logger = logging.getLogger(__name__)
# CASSANDRA-10978. Migration wait (in seconds) to use in bootstrapping tests. Needed to handle
# pathological case of flushing schema keyspace for multiple data directories. See CASSANDRA-6696
# for multiple data directory changes and CASSANDRA-10421 for compaction logging that must be
# written.
MIGRATION_WAIT = 5
@flaky
@since('3.0')
class TestMaterializedViews(Tester):
"""
Test materialized views implementation.
@jira_ticket CASSANDRA-6477
@since 3.0
"""
def _rows_to_list(self, rows):
new_list = [list(row) for row in rows]
return new_list
def prepare(self, user_table=False, rf=1, options=None, nodes=3, install_byteman=False, **kwargs):
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate([nodes, 0], install_byteman=install_byteman)
if options:
cluster.set_configuration_options(values=options)
cluster.start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1, **kwargs)
create_ks(session, 'ks', rf)
if user_table:
session.execute(
("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username));")
)
# create a materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)"))
return session
def update_view(self, session, query, flush, compact=False):
session.execute(query)
self._replay_batchlogs()
if flush:
self.cluster.flush()
if compact:
self.cluster.compact()
def _settle_nodes(self):
logger.debug("Settling all nodes")
stage_match = re.compile(r"(?P<name>\S+)\s+(?P<active>\d+)\s+(?P<pending>\d+)\s+(?P<completed>\d+)\s+(?P<blocked>\d+)\s+(?P<alltimeblocked>\d+)")
def _settled_stages(node):
(stdout, stderr, rc) = node.nodetool("tpstats")
lines = re.split("\n+", stdout)
for line in lines:
match = stage_match.match(line)
if match is not None:
active = int(match.group('active'))
pending = int(match.group('pending'))
if active != 0 or pending != 0:
logger.debug("%s - pool %s still has %d active and %d pending" % (node.name, match.group("name"), active, pending))
return False
return True
for node in self.cluster.nodelist():
if node.is_running():
node.nodetool("replaybatchlog")
attempts = 50 # 100 milliseconds per attempt, so 5 seconds total
while attempts > 0 and not _settled_stages(node):
time.sleep(0.1)
attempts -= 1
def _build_progress_table(self):
if self.cluster.version() >= '4':
return 'system.view_builds_in_progress'
else:
return 'system.views_builds_in_progress'
def _wait_for_view(self, ks, view):
logger.debug("waiting for view")
def _view_build_finished(node):
s = self.patient_exclusive_cql_connection(node)
query = "SELECT * FROM %s WHERE keyspace_name='%s' AND view_name='%s'" %\
(self._build_progress_table(), ks, view)
result = list(s.execute(query))
return len(result) == 0
for node in self.cluster.nodelist():
if node.is_running():
attempts = 50 # 1 sec per attempt, so 50 seconds total
while attempts > 0 and not _view_build_finished(node):
time.sleep(1)
attempts -= 1
if attempts <= 0:
raise RuntimeError("View {}.{} build not finished after 50 seconds.".format(ks, view))
def _wait_for_view_build_start(self, session, ks, view, wait_minutes=2):
"""Wait for the start of a MV build, ensuring that it has saved some progress"""
start = time.time()
while True:
try:
query = "SELECT COUNT(*) FROM %s WHERE keyspace_name='%s' AND view_name='%s'" %\
(self._build_progress_table(), ks, view)
result = list(session.execute(query))
assert 0 == result[0].count
except AssertionError:
break
elapsed = (time.time() - start) / 60
if elapsed > wait_minutes:
pytest.fail("The MV build hasn't started in 2 minutes.")
def _insert_data(self, session):
# insert data
insert_stmt = "INSERT INTO users (username, password, gender, state, birth_year) VALUES "
session.execute(insert_stmt + "('user1', 'ch@ngem3a', 'f', 'TX', 1968);")
session.execute(insert_stmt + "('user2', 'ch@ngem3b', 'm', 'CA', 1971);")
session.execute(insert_stmt + "('user3', 'ch@ngem3c', 'f', 'FL', 1978);")
session.execute(insert_stmt + "('user4', 'ch@ngem3d', 'm', 'TX', 1974);")
self._settle_nodes()
def _replay_batchlogs(self):
for node in self.cluster.nodelist():
if node.is_running():
logger.debug("Replaying batchlog on node {}".format(node.name))
node.nodetool("replaybatchlog")
# CASSANDRA-13069 - Ensure replayed mutations are removed from the batchlog
node_session = self.patient_exclusive_cql_connection(node)
result = list(node_session.execute("SELECT count(*) FROM system.batches;"))
assert result[0].count == 0
def _assert_view_meta(self, session, views, exists=True, nodes=2):
if exists:
assert_one(session, "SELECT COUNT(*) FROM system.built_views", [views])
if self.cluster.version() >= '3.11':
assert_one(session, "SELECT COUNT(*) FROM system_distributed.view_build_status", [views * nodes])
else:
assert_none(session, "SELECT * FROM system.built_views")
if self.cluster.version() >= '3.11':
assert_none(session, "SELECT * FROM system_distributed.view_build_status")
assert_none(session, "SELECT * FROM {}".format(self._build_progress_table()))
def test_view_metadata_cleanup(self):
"""
drop keyspace or view should clear built_views and view_build_status
"""
session = self.prepare(rf=2, nodes=2)
def populate_data(session, rows):
logger.debug("populate base data")
for v in range(rows):
session.execute("INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})".format(v=v))
def verify_data(session, rows, views):
logger.debug("verify view data")
for v in range(rows):
for view in range(views):
assert_one(session, "SELECT * FROM mv{} WHERE k={v} AND c={v}".format(view, v=v), [v, v, v, v, v, v])
def create_keyspace(session, ks="ks1", rf=2):
create_ks(session, ks, rf)
def create_table(session):
logger.debug("create base table")
session.execute("CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))")
def create_views(session, views, keyspace="ks1"):
logger.debug("create view")
for view in range(views):
session.execute("CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t "
"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)".format(view),
timeout=60)
self._wait_for_view(keyspace, "mv{}".format(view))
def drop_keyspace(session, keyspace="ks1"):
logger.debug("drop keyspace {}".format(keyspace))
session.execute("DROP KEYSPACE IF EXISTS {}".format(keyspace),
timeout=60)
def drop_views(session, views):
logger.debug("drop all views")
for view in range(views):
session.execute("DROP MATERIALIZED VIEW IF EXISTS mv{}".format(view))
rows = 100
views = 5
create_keyspace(session)
create_table(session)
populate_data(session, rows)
create_views(session, views)
verify_data(session, rows, views)
self._assert_view_meta(session, views)
drop_keyspace(session)
self._assert_view_meta(session, views, exists=False)
create_keyspace(session)
create_table(session)
populate_data(session, rows)
create_views(session, views)
verify_data(session, rows, views)
self._assert_view_meta(session, views)
drop_views(session, views)
self._assert_view_meta(session, views, exists=False)
def test_create(self):
"""Test the materialized view creation"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting 1 materialized view == got" + str(result)
def test_gcgs_validation(self):
"""Verify that it's not possible to create or set a too low gc_grace_seconds on MVs"""
session = self.prepare(user_table=True)
# Shouldn't be able to alter the gc_grace_seconds of the base table to 0
assert_invalid(session,
"ALTER TABLE users WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of the base table of a materialized view "
"to 0, since this value is used to TTL undelivered updates. Setting "
"gc_grace_seconds too low might cause undelivered updates to expire "
"before being replayed.")
# But can alter the gc_grace_seconds of the bease table to a value != 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 10")
# Shouldn't be able to alter the gc_grace_seconds of the MV to 0
assert_invalid(session,
"ALTER MATERIALIZED VIEW users_by_state WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of a materialized view to 0, since "
"this value is used to TTL undelivered updates. Setting gc_grace_seconds "
"too low might cause undelivered updates to expire before being replayed.")
# Now let's drop MV
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
# Now we should be able to set the gc_grace_seconds of the base table to 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 0")
# Now we shouldn't be able to create a new MV on this table
assert_invalid(session,
"CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)",
"Cannot create materialized view 'users_by_state' for base table 'users' "
"with gc_grace_seconds of 0, since this value is used to TTL undelivered "
"updates. Setting gc_grace_seconds too low might cause undelivered updates"
" to expire before being replayed.")
def test_insert(self):
"""Test basic insertions"""
session = self.prepare(user_table=True)
self._insert_data(session)
result = list(session.execute("SELECT * FROM users;"))
assert len(result) == 4, "Expecting {} users, got {}".format(4 == len(result))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='TX';"))
assert len(result) == 2, "Expecting {} users, got {}".format(2 == len(result))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='CA';"))
assert len(result) == 1, "Expecting {} users, got {}".format(1 == len(result))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='MA';"))
assert len(result) == 0, "Expecting {} users, got {}".format(0 == len(result))
def test_populate_mv_after_insert(self):
"""Test that a view is OK when created with existing data"""
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({v}, {v})".format(v=i))
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("wait for view to build")
self._wait_for_view("ks", "t_by_v")
logger.debug("wait that all batchlogs are replayed")
self._replay_batchlogs()
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i])
@pytest.mark.xfail(reason="Should be addressed with CASSANDRA-15845")
@since('4.0')
def test_populate_mv_after_insert_wide_rows_version40(self):
self.test_populate_mv_after_insert_wide_rows()
@since('3.0', max_version='3.X')
def test_populate_mv_after_insert_wide_rows(self):
"""Test that a view is OK when created with existing data with wide rows"""
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))")
for i in range(5):
for j in range(10000):
session.execute("INSERT INTO t (id, v) VALUES ({}, {})".format(i, j))
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("wait for view to build")
self._wait_for_view("ks", "t_by_v")
logger.debug("wait that all batchlogs are replayed")
self._replay_batchlogs()
for i in range(5):
for j in range(10000):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, j), [j, i])
def test_crc_check_chance(self):
"""Test that crc_check_chance parameter is properly populated after mv creation and update"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5"))
assert_crc_check_chance_equal(session, "t_by_v", 0.5, view=True)
session.execute("ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3")
assert_crc_check_chance_equal(session, "t_by_v", 0.3, view=True)
def test_prepared_statement(self):
"""Test basic insertions with prepared statement"""
session = self.prepare(user_table=True)
insertPrepared = session.prepare(
"INSERT INTO users (username, password, gender, state, birth_year) VALUES (?, ?, ?, ?, ?);"
)
selectPrepared = session.prepare(
"SELECT state, password, session_token FROM users_by_state WHERE state=?;"
)
# insert data
session.execute(insertPrepared.bind(('user1', 'ch@ngem3a', 'f', 'TX', 1968)))
session.execute(insertPrepared.bind(('user2', 'ch@ngem3b', 'm', 'CA', 1971)))
session.execute(insertPrepared.bind(('user3', 'ch@ngem3c', 'f', 'FL', 1978)))
session.execute(insertPrepared.bind(('user4', 'ch@ngem3d', 'm', 'TX', 1974)))
result = list(session.execute("SELECT * FROM users;"))
assert len(result) == 4, "Expecting {} users, got {}".format(4, len(result))
result = list(session.execute(selectPrepared.bind(['TX'])))
assert len(result) == 2, "Expecting {} users, got {}".format(2, len(result))
result = list(session.execute(selectPrepared.bind(['CA'])))
assert len(result) == 1, "Expecting {} users, got {}".format(1, len(result))
result = list(session.execute(selectPrepared.bind(['MA'])))
assert len(result) == 0, "Expecting {} users, got {}".format(0, len(result))
def test_immutable(self):
"""Test that a materialized view is immutable"""
session = self.prepare(user_table=True)
# cannot insert
assert_invalid(session, "INSERT INTO users_by_state (state, username) VALUES ('TX', 'user1');",
"Cannot directly modify a materialized view")
# cannot update
assert_invalid(session, "UPDATE users_by_state SET session_token='XYZ' WHERE username='user1' AND state = 'TX';",
"Cannot directly modify a materialized view")
# cannot delete a row
assert_invalid(session, "DELETE from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot delete a cell
assert_invalid(session, "DELETE session_token from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot alter a table
assert_invalid(session, "ALTER TABLE users_by_state ADD first_name varchar",
"Cannot use ALTER TABLE on Materialized View")
def test_drop_mv(self):
"""Test that we can drop a view properly"""
session = self.prepare(user_table=True)
# create another materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_birth_year AS "
"SELECT * FROM users WHERE birth_year IS NOT NULL AND "
"username IS NOT NULL PRIMARY KEY (birth_year, username)"))
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 2, "Expecting {} materialized view, got {}".format(2, len(result))
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
def test_drop_column(self):
"""Test that we cannot drop a column if it is used by a MV"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
assert_invalid(
session,
"ALTER TABLE ks.users DROP state;",
"Cannot drop column state on base table with materialized views."
)
def test_drop_table(self):
"""Test that we cannot drop a table without deleting its MVs first"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
assert_invalid(
session,
"DROP TABLE ks.users;",
"Cannot drop table when materialized views still depend on it"
)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
session.execute("DROP TABLE ks.users;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 0, "Expecting {} materialized view, got {}".format(1, len(result))
def test_clustering_column(self):
"""Test that we can use clustering columns as primary key for a materialized view"""
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute(("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username, state, birth_year));"))
# create a materialized view that use a compound key
session.execute(("CREATE MATERIALIZED VIEW users_by_state_birth_year "
"AS SELECT * FROM users WHERE state IS NOT NULL AND birth_year IS NOT NULL "
"AND username IS NOT NULL PRIMARY KEY (state, birth_year, username)"))
session.cluster.control_connection.wait_for_schema_agreement()
self._insert_data(session)
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX'"))
assert len(result) == 2, "Expecting {} users, got {}".format(2, len(result))
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX' AND birth_year=1968"))
assert len(result) == 1, "Expecting {} users, got {}".format(1, len(result))
def _add_dc_after_mv_test(self, rf, nts):
"""
@jira_ticket CASSANDRA-10978
Add datacenter with configurable replication.
"""
session = self.prepare(rf=rf)
logger.debug("Creating schema")
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Writing 1k to base")
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
logger.debug("Reading 1k from view")
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
logger.debug("Reading 1k from base")
for i in range(1000):
assert_one(session, "SELECT * FROM t WHERE id = {}".format(i), [i, -i])
logger.debug("Bootstrapping new node in another dc")
node4 = new_node(self.cluster, data_center='dc2')
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
logger.debug("Bootstrapping new node in another dc")
node5 = new_node(self.cluster, remote_debug_port='1414', data_center='dc2')
node5.start(jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)], wait_for_binary_proto=True)
if nts:
session.execute("alter keyspace ks with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
session.execute("alter keyspace system_auth with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
session.execute("alter keyspace system_traces with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
node4.nodetool('rebuild dc1')
node5.nodetool('rebuild dc1')
cl = ConsistencyLevel.LOCAL_ONE if nts else ConsistencyLevel.ONE
session2 = self.patient_exclusive_cql_connection(node4, consistency_level=cl)
logger.debug("Verifying data from new node in view")
for i in range(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
logger.debug("Inserting 100 into base")
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
logger.debug("Verify 100 in view")
for i in range(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
@pytest.mark.resource_intensive
def test_add_dc_after_mv_simple_replication(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with SimpleStrategy.
"""
self._add_dc_after_mv_test(1, False)
@pytest.mark.resource_intensive
def test_add_dc_after_mv_network_replication(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with NetworkTopologyStrategy.
"""
self._add_dc_after_mv_test({'dc1': 1}, True)
@pytest.mark.resource_intensive
def test_add_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster, data_center="dc1")
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
"""
@jira_ticket CASSANDRA-12984
Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again
"""
assert_one(session2, "SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'", [1])
for i in range(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def test_insert_during_range_movement_rf1(self):
self._base_test_insert_during_range_movement(rf=1)
def test_insert_during_range_movement_rf2(self):
self._base_test_insert_during_range_movement(rf=2)
def test_insert_during_range_movement_rf3(self):
self._base_test_insert_during_range_movement(rf=3)
def _base_test_insert_during_range_movement(self, rf):
"""
@jira_ticket CASSANDRA-14251
Test that materialized views replication work in the middle of a join
for different replication factors.
"""
session = self.prepare(rf=rf)
logger.debug("Creating table and view")
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Starting new node4 in write survey mode")
node4 = new_node(self.cluster, data_center="dc1")
# Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true",
"-Dcassandra.batchlog.replay_timeout_in_ms=1"])
logger.debug("Insert data while node4 is joining")
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
logger.debug("Finish joining node4")
node4.nodetool("join")
logger.debug('Replay batchlogs')
time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)
self._replay_batchlogs()
logger.debug("Verify data")
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
@pytest.mark.resource_intensive
def test_add_node_after_wide_mv_with_range_deletions(self):
"""
@jira_ticket CASSANDRA-11670
Test that materialized views work with wide materialized views as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(10):
for j in range(100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
self.cluster.flush()
for i in range(10):
for j in range(100):
assert_one(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(10):
for j in range(100):
if j % 10 == 0:
session.execute("DELETE FROM t WHERE id = {} AND v >= {} and v < {}".format(i, j, j + 2))
self.cluster.flush()
for i in range(10):
for j in range(100):
if j % 10 == 0 or (j - 1) % 10 == 0:
assert_none(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j))
assert_none(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j))
else:
assert_one(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
node4 = new_node(self.cluster, data_center="dc1")
node4.set_configuration_options(values={'max_mutation_size_in_kb': 20}) # CASSANDRA-11670
logger.debug("Start join at {}".format(time.strftime("%H:%M:%S")))
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
for i in range(10):
for j in range(100):
if j % 10 == 0 or (j - 1) % 10 == 0:
assert_none(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j))
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j))
else:
assert_one(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(10):
for j in range(100, 110):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
for i in range(10):
for j in range(110):
if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):
assert_none(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j))
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j))
else:
assert_one(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
@pytest.mark.resource_intensive
def test_add_node_after_very_wide_mv(self):
"""
@jira_ticket CASSANDRA-11670
Test that materialized views work with very wide materialized views as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(5):
for j in range(5000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
self.cluster.flush()
for i in range(5):
for j in range(5000):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
node4 = new_node(self.cluster, data_center="dc1")
node4.set_configuration_options(values={'max_mutation_size_in_kb': 20}) # CASSANDRA-11670
logger.debug("Start join at {}".format(time.strftime("%H:%M:%S")))
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
for i in range(5):
for j in range(5000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(5):
for j in range(5100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
for i in range(5):
for j in range(5100):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
@pytest.mark.resource_intensive
def test_add_write_survey_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10621
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node in write survey mode.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster, data_center="dc1")
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true", "-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def test_allow_filtering(self):
"""Test that allow filtering works as usual for a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {v}".format(v=i), [i, i, 'a', 3.0])
rows = list(session.execute("SELECT * FROM t_by_v2 WHERE v2 = 'a'"))
assert len(rows) == 1000, "Expected 1000 rows but got {}".format(len(rows))
assert_invalid(session, "SELECT * FROM t_by_v WHERE v = 1 AND v2 = 'a'")
assert_invalid(session, "SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = 1")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {} AND v3 = 3.0 ALLOW FILTERING".format(i),
[i, i, 'a', 3.0]
)
assert_one(
session,
"SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = {} ALLOW FILTERING".format(i),
['a', i, i, 3.0]
)
def test_secondary_index(self):
"""Test that secondary indexes cannot be created on a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
assert_invalid(session, "CREATE INDEX ON t_by_v (v2)",
"Secondary indexes are not supported on materialized views")
def test_ttl(self):
"""
Test that TTL works as expected for a materialized view
@expected_result The TTL is propagated properly between tables.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in range(100):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10".format(v=i))
for i in range(100):
assert_one(session, "SELECT * FROM t_by_v2 WHERE v2 = {}".format(i), [i, i, i, i])
time.sleep(20)
rows = list(session.execute("SELECT * FROM t_by_v2"))
assert len(rows) == 0, "Expected 0 rows but got {}".format(len(rows))
def test_query_all_new_column(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when adding a new column
@expected_result The new column is present in the view.
"""
session = self.prepare(user_table=True)
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'"))
assert len(results) == 1
assert hasattr(results[0], 'first_name'), 'Column "first_name" not found'
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, None, 'f', 'ch@ngem3a', None]
)
def test_query_new_column(self):
"""
Test that a materialized view created with 'SELECT <col1, ...>' works as expected when adding a new column
@expected_result The new column is not present in the view.
"""
session = self.prepare(user_table=True)
session.execute(("CREATE MATERIALIZED VIEW users_by_state2 AS SELECT state, username FROM users "
"WHERE STATE IS NOT NULL AND USERNAME IS NOT NULL PRIMARY KEY (state, username)"))
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'"))
assert len(results) == 1
assert not hasattr(results[0], 'first_name'), 'Column "first_name" found in view'
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
def test_rename_column(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when renaming a column
@expected_result The column is also renamed in the view.
"""
session = self.prepare(user_table=True)
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
session.execute("ALTER TABLE users RENAME username TO user")
results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND user = 'user1'"))
assert len(results) == 1
assert hasattr(results[0], 'user'), 'Column "user" not found'
assert_one(
session,
"SELECT state, user, birth_year, gender FROM users_by_state WHERE state = 'TX' AND user = 'user1'",
['TX', 'user1', 1968, 'f']
)
def test_rename_column_atomicity(self):
"""
Test that column renaming is atomically done between a table and its materialized views
@jira_ticket CASSANDRA-12952
"""
session = self.prepare(nodes=1, user_table=True, install_byteman=True)
node = self.cluster.nodelist()[0]
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
# Rename a column with an injected byteman rule to kill the node after the first schema update
self.fixture_dtest_setup.allow_log_errors = True
script_version = '4x' if self.cluster.version() >= '4' else '3x'
node.byteman_submit([mk_bman_path('merge_schema_failure_{}.btm'.format(script_version))])
with pytest.raises(NoHostAvailable):
session.execute("ALTER TABLE users RENAME username TO user")
logger.debug('Restarting node')
node.stop()
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node, consistency_level=ConsistencyLevel.ONE)
# Both the table and its view should have the new schema after restart
assert_one(
session,
"SELECT * FROM ks.users WHERE state = 'TX' AND user = 'user1' ALLOW FILTERING",
['user1', 1968, 'f', 'ch@ngem3a', None, 'TX']
)
assert_one(
session,
"SELECT * FROM ks.users_by_state WHERE state = 'TX' AND user = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
def test_lwt(self):
"""Test that lightweight transaction behave properly with a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Inserting initial data using IF NOT EXISTS")
for i in range(1000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
self._replay_batchlogs()
logger.debug("All rows should have been inserted")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug("Tyring to UpInsert data with a different value using IF NOT EXISTS")
for i in range(1000):
v = i * 2
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS".format(id=i, v=v)
)
self._replay_batchlogs()
logger.debug("No rows should have changed")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug("Update the 10 first rows with a different value")
for i in range(1000):
v = i + 2000
session.execute(
"UPDATE t SET v={v} WHERE id = {id} IF v < 10".format(id=i, v=v)
)
self._replay_batchlogs()
logger.debug("Verify that only the 10 first rows changed.")
results = list(session.execute("SELECT * FROM t_by_v;"))
assert len(results) == 1000
for i in range(1000):
v = i + 2000 if i < 10 else i
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(v),
[v, i, 'a', 3.0]
)
logger.debug("Deleting the first 10 rows")
for i in range(1000):
v = i + 2000
session.execute(
"DELETE FROM t WHERE id = {id} IF v = {v} ".format(id=i, v=v)
)
self._replay_batchlogs()
logger.debug("Verify that only the 10 first rows have been deleted.")
results = list(session.execute("SELECT * FROM t_by_v;"))
assert len(results) == 990
for i in range(10, 1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
def test_interrupt_build_process(self):
"""Test that an interrupted MV build process is resumed as it should"""
options = {'hinted_handoff_enabled': False}
if self.cluster.version() >= '4':
options['concurrent_materialized_view_builders'] = 4
session = self.prepare(options=options, install_byteman=True)
node1, node2, node3 = self.cluster.nodelist()
logger.debug("Avoid premature MV build finalization with byteman")
for node in self.cluster.nodelist():
if self.cluster.version() >= '4':
node.byteman_submit([mk_bman_path('4.0/skip_view_build_finalization.btm')])
node.byteman_submit([mk_bman_path('4.0/skip_view_build_task_finalization.btm')])
else:
node.byteman_submit([mk_bman_path('pre4.0/skip_finish_view_build_status.btm')])
node.byteman_submit([mk_bman_path('pre4.0/skip_view_build_update_distributed.btm')])
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
logger.debug("Inserting initial data")
for i in range(10000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stop the cluster. Interrupt the MV build process.")
self.cluster.stop()
logger.debug("Checking logs to verify that the view build tasks have been created")
for node in self.cluster.nodelist():
assert node.grep_log('Starting new view build', filename='debug.log')
assert not node.grep_log('Resuming view build', filename='debug.log')
node.mark_log(filename='debug.log')
logger.debug("Restart the cluster")
self.cluster.start()
session = self.patient_cql_connection(node1)
session.execute("USE ks")
logger.debug("MV shouldn't be built yet.")
assert len(list(session.execute("SELECT COUNT(*) FROM t_by_v"))) != 10000
logger.debug("Wait and ensure the MV build resumed. Waiting up to 2 minutes.")
self._wait_for_view("ks", "t_by_v")
logger.debug("Verify all data")
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [10000])
for i in range(10000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
logger.debug("Checking logs to verify that some view build tasks have been resumed")
for node in self.cluster.nodelist():
assert node.grep_log('Resuming view build', filename='debug.log')
@pytest.mark.skip(reason="Frequently fails in CI. Skipping until fixed as tracked by CASSANDRA-14148")
@since('4.0')
def test_drop_while_building(self):
"""Test that a parallel MV build is interrupted when the view is removed"""
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in self.cluster.nodelist():
node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Drop the MV while it is still building")
session.execute("DROP MATERIALIZED VIEW t_by_v")
logger.debug("Verify that the build has been stopped before its finalization without errors")
for node in self.cluster.nodelist():
self.check_logs_for_errors()
assert not node.grep_log('Marking view', filename='debug.log')
assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')
logger.debug("Verify that the view has been removed")
failed = False
try:
session.execute("SELECT COUNT(*) FROM t_by_v")
except InvalidRequest:
failed = True
self.assertTrue(failed, "The view shouldn't be queryable")
self._assert_view_meta(session, views=1, exists=False)
logger.debug("Create the MV again")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [5000])
@since('4.0')
def test_drop_with_stopped_build(self):
"""Test that MV whose build has been stopped with `nodetool stop` can be dropped"""
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
nodes = self.cluster.nodelist()
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in nodes:
node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stopping all running view build tasks with nodetool")
for node in nodes:
node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)
node.nodetool('stop VIEW_BUILD')
logger.debug("Checking logs to verify that some view build tasks have been stopped")
for node in nodes:
node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)
node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)
self.check_logs_for_errors()
logger.debug("Drop the MV while it is still building")
session.execute("DROP MATERIALIZED VIEW t_by_v")
logger.debug("Verify that the build has been stopped before its finalization without errors")
for node in nodes:
self.check_logs_for_errors()
assert not node.grep_log('Marking view', filename='debug.log')
assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')
logger.debug("Verify that the view has been removed")
failed = False
try:
session.execute("SELECT COUNT(*) FROM t_by_v")
except InvalidRequest:
failed = True
assert failed, "The view shouldn't be queryable"
logger.debug("Create the MV again")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [5000])
@since('4.0')
def test_resume_stopped_build(self):
"""Test that MV builds stopped with `nodetool stop` are resumed after restart"""
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
nodes = self.cluster.nodelist()
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in nodes:
node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stopping all running view build tasks with nodetool")
for node in nodes:
node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)
node.nodetool('stop VIEW_BUILD')
logger.debug("Checking logs to verify that some view build tasks have been stopped")
for node in nodes:
node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)
node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)
node.watch_log_for('Interrupted build for view', filename='debug.log', timeout=120)
assert not node.grep_log('Marking view', filename='debug.log')
self.check_logs_for_errors()
logger.debug("Check that MV shouldn't be built yet.")
assert len(list(session.execute("SELECT COUNT(*) FROM t_by_v"))) != 5000
logger.debug("Restart the cluster")
self.cluster.stop()
marks = [node.mark_log() for node in nodes]
self.cluster.start()
session = self.patient_cql_connection(nodes[0])
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
assert_one(session, "SELECT COUNT(*) FROM ks.t_by_v", [5000])
logger.debug("Checking logs to verify that the view build has been resumed and completed after restart")
for node, mark in zip(nodes, marks):
assert node.grep_log('Resuming view build', filename='debug.log', from_mark=mark)
assert node.grep_log('Marking view', filename='debug.log', from_mark=mark)
self.check_logs_for_errors()
@since('3.0')
def test_mv_with_default_ttl_with_flush(self):
self._test_mv_with_default_ttl(True)
@since('3.0')
def test_mv_with_default_ttl_without_flush(self):
self._test_mv_with_default_ttl(False)
def _test_mv_with_default_ttl(self, flush):
"""
Verify mv with default_time_to_live can be deleted properly using expired livenessInfo
@jira_ticket CASSANDRA-14071
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
logger.debug("MV with same key and unselected columns")
session.execute("CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600")
session.execute(("CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)"))
session.cluster.control_connection.wait_for_schema_agreement()
self.update_view(session, "UPDATE t2 SET c=1 WHERE k=1 AND a=1;", flush)
assert_one(session, "SELECT k,a,b,c FROM t2", [1, 1, None, 1])
assert_one(session, "SELECT k,a,b FROM mv2", [1, 1, None])
self.update_view(session, "UPDATE t2 SET c=null WHERE k=1 AND a=1;", flush)
assert_none(session, "SELECT k,a,b,c FROM t2")
assert_none(session, "SELECT k,a,b FROM mv2")
self.update_view(session, "UPDATE t2 SET c=2 WHERE k=1 AND a=1;", flush)
assert_one(session, "SELECT k,a,b,c FROM t2", [1, 1, None, 2])
assert_one(session, "SELECT k,a,b FROM mv2", [1, 1, None])
self.update_view(session, "DELETE c FROM t2 WHERE k=1 AND a=1;", flush)
assert_none(session, "SELECT k,a,b,c FROM t2")
assert_none(session, "SELECT k,a,b FROM mv2")
if flush:
self.cluster.compact()
assert_none(session, "SELECT * FROM t2")
assert_none(session, "SELECT * FROM mv2")
# test with user-provided ttl
self.update_view(session, "INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5", flush)
self.update_view(session, "UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;", flush)
self.update_view(session, "UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;", flush)
self.update_view(session, "DELETE c FROM t2 WHERE k=2 AND a=2;", flush)
time.sleep(5)
assert_none(session, "SELECT k,a,b,c FROM t2")
assert_none(session, "SELECT k,a,b FROM mv2")
if flush:
self.cluster.compact()
assert_none(session, "SELECT * FROM t2")
assert_none(session, "SELECT * FROM mv2")
logger.debug("MV with extra key")
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1);", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1])
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 2, 1);", flush)
assert_one(session, "SELECT * FROM t", [1, 2, 1])
assert_one(session, "SELECT * FROM mv", [1, 2, 1])
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 3, 1);", flush)
assert_one(session, "SELECT * FROM t", [1, 3, 1])
assert_one(session, "SELECT * FROM mv", [1, 3, 1])
if flush:
self.cluster.compact()
assert_one(session, "SELECT * FROM t", [1, 3, 1])
assert_one(session, "SELECT * FROM mv", [1, 3, 1])
# user provided ttl
self.update_view(session, "UPDATE t USING TTL 50 SET a = 4 WHERE k = 1", flush)
assert_one(session, "SELECT * FROM t", [1, 4, 1])
assert_one(session, "SELECT * FROM mv", [1, 4, 1])
self.update_view(session, "UPDATE t USING TTL 40 SET a = 5 WHERE k = 1", flush)
assert_one(session, "SELECT * FROM t", [1, 5, 1])
assert_one(session, "SELECT * FROM mv", [1, 5, 1])
self.update_view(session, "UPDATE t USING TTL 30 SET a = 6 WHERE k = 1", flush)
assert_one(session, "SELECT * FROM t", [1, 6, 1])
assert_one(session, "SELECT * FROM mv", [1, 6, 1])
if flush:
self.cluster.compact()
assert_one(session, "SELECT * FROM t", [1, 6, 1])
assert_one(session, "SELECT * FROM mv", [1, 6, 1])
@flaky
@since('3.0')
def test_no_base_column_in_view_pk_complex_timestamp_with_flush(self):
self._test_no_base_column_in_view_pk_complex_timestamp(flush=True)
@pytest.mark.skip(reason="Frequently fails in CI. Skipping until fixed as tracked by CASSANDRA-14148")
@since('3.0')
def test_no_base_column_in_view_pk_complex_timestamp_without_flush(self):
self._test_no_base_column_in_view_pk_complex_timestamp(flush=False)
def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):
"""
Able to shadow old view row if all columns in base are removed including unselected
Able to recreate view row if at least one selected column alive
@jira_ticket CASSANDRA-11500
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
session.execute("CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t "
"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)"))
session.cluster.control_connection.wait_for_schema_agreement()
# update unselected, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, 1, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, add selected column, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, 1, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, 1])
# remove selected column, view row is removed
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# update unselected with ts=3, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# insert livenesssInfo, view row should be alive
self.update_view(session, "INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, view row should be alive because of base livenessInfo alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# add selected column, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# update unselected, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# delete with ts=3, view row should be alive due to unselected@ts4
self.update_view(session, "DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, view row should be removed
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# add selected with ts=7, view row is alive
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, 1, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, 1])
# remove selected with ts=7, view row is dead
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# add selected with ts=5, view row is alive (selected column should not affects each other)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# add selected with ttl=20 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)
self.update_view(session, "UPDATE t USING TTL 20 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
time.sleep(20)
# update unselected with ttl=10, view row should be alive
self.update_view(session, "UPDATE t USING TTL 20 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
time.sleep(20)
# view row still alive due to base livenessInfo
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
@since('3.0')
def test_base_column_in_view_pk_complex_timestamp_with_flush(self):
self._test_base_column_in_view_pk_complex_timestamp(flush=True)
@since('3.0')
def test_base_column_in_view_pk_complex_timestamp_without_flush(self):
self._test_base_column_in_view_pk_complex_timestamp(flush=False)
def _test_base_column_in_view_pk_complex_timestamp(self, flush):
"""
Able to shadow old view row with column ts greater than pk's ts and re-insert the view row
Able to shadow old view row with column ts smaller than pk's ts and re-insert the view row
@jira_ticket CASSANDRA-11500
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int)")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
# Set initial values TS=1
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1])
# increase b ts to 10
self.update_view(session, "UPDATE t USING TIMESTAMP 10 SET b = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET a = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 2, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 2, 10])
# switch entries. shadow a = 2, insert a = 1
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET a = 1 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET a = 2 WHERE k = 1;", flush, compact=True)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 2, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 2, 10])
# able to shadow view row even if base-column in view pk's ts is smaller than row timestamp
# set row TS = 20, a@6, b@20
self.update_view(session, "DELETE FROM t USING TIMESTAMP 5 where k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, None, 2, 10])
assert_none(session, "SELECT k,a,b,writetime(b) FROM mv")
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 6;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
self.update_view(session, "INSERT INTO t (k, b) VALUES (1, 1) USING TIMESTAMP 20;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 1, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 1, 20])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET a = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(a),writetime(b) FROM t", [1, 2, 1, 7, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 1, 20])
# switch entries. shadow a = 2, insert a = 1
self.update_view(session, "UPDATE t USING TIMESTAMP 8 SET a = 1 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(a),writetime(b) FROM t", [1, 1, 1, 8, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 1, 20])
# create another view row
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (2, 2, 2);", flush)
assert_one(session, "SELECT k,a,b FROM t WHERE k = 2", [2, 2, 2])
assert_one(session, "SELECT k,a,b FROM mv WHERE k = 2", [2, 2, 2])
# stop node2, node3
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
logger.debug('Shutdown node3')
node3.stop(wait_other_notice=True)
# shadow a = 1, create a = 2
query = SimpleStatement("UPDATE t USING TIMESTAMP 9 SET a = 2 WHERE k = 1", consistency_level=ConsistencyLevel.ONE)
self.update_view(session, query, flush)
# shadow (a=2, k=2) after 3 second
query = SimpleStatement("UPDATE t USING TTL 3 SET a = 2 WHERE k = 2", consistency_level=ConsistencyLevel.ONE)
self.update_view(session, query, flush)
logger.debug('Starting node2')
node2.start(wait_for_binary_proto=True)
logger.debug('Starting node3')
node3.start(wait_for_binary_proto=True)
# For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired
query = SimpleStatement("SELECT * FROM mv WHERE k = 1 AND a = 1", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
assert 0 == len(result.current_rows)
# For k = 1 & a = 1, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert_none(session, "SELECT * FROM mv WHERE k = 1 AND a = 1")
assert 0 == len(result.current_rows)
# For k = 1 & a = 2, We should get a digest mismatch of data and repaired for a = 2
query = SimpleStatement("SELECT * FROM mv WHERE k = 1 AND a = 2", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
assert 1 == len(result.current_rows)
# For k = 1 & a = 2, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert 1 == len(result.current_rows)
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv WHERE k = 1", [1, 2, 1, 20])
time.sleep(3)
# For k = 2 & a = 2, We should get a digest mismatch of expired and repaired
query = SimpleStatement("SELECT * FROM mv WHERE k = 2 AND a = 2", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
logger.debug(result.current_rows)
assert 0 == len(result.current_rows)
# For k = 2 & a = 2, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert 0 == len(result.current_rows)
@since('3.0')
def test_expired_liveness_with_limit_rf1_nodes1(self):
self._test_expired_liveness_with_limit(rf=1, nodes=1)
@since('3.0')
def test_expired_liveness_with_limit_rf1_nodes3(self):
self._test_expired_liveness_with_limit(rf=1, nodes=3)
@since('3.0')
def test_expired_liveness_with_limit_rf3(self):
self._test_expired_liveness_with_limit(rf=3, nodes=3)
def _test_expired_liveness_with_limit(self, rf, nodes):
"""
Test MV with expired liveness limit is properly handled
@jira_ticket CASSANDRA-13883
"""
session = self.prepare(rf=rf, nodes=nodes, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1 = self.cluster.nodelist()[0]
session.execute('USE ks')
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int)")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
for k in range(100):
session.execute("INSERT INTO t (k, a, b) VALUES ({}, {}, {})".format(k, k, k))
# generate view row with expired liveness except for row 50 and 99
for k in range(100):
if k == 50 or k == 99:
continue
session.execute("DELETE a FROM t where k = {};".format(k))
# there should be 2 live data
assert_one(session, "SELECT k,a,b FROM mv limit 1", [50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv limit 2", [[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv", [[50, 50, 50], [99, 99, 99]])
# verify IN
keys = range(100)
assert_one(session, "SELECT k,a,b FROM mv WHERE k in ({}) limit 1".format(', '.join(str(x) for x in keys)),
[50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv WHERE k in ({}) limit 2".format(', '.join(str(x) for x in keys)),
[[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv WHERE k in ({})".format(', '.join(str(x) for x in keys)),
[[50, 50, 50], [99, 99, 99]])
# verify fetch size
session.default_fetch_size = 1
assert_one(session, "SELECT k,a,b FROM mv limit 1", [50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv limit 2", [[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv", [[50, 50, 50], [99, 99, 99]])
@since('3.0')
def test_base_column_in_view_pk_commutative_tombstone_with_flush(self):
self._test_base_column_in_view_pk_commutative_tombstone_(flush=True)
@since('3.0')
def test_base_column_in_view_pk_commutative_tombstone_without_flush(self):
self._test_base_column_in_view_pk_commutative_tombstone_(flush=False)
def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):
"""
view row deletion should be commutative with newer view livenessInfo, otherwise deleted columns may be resurrected.
@jira_ticket CASSANDRA-13409
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1 = self.cluster.nodelist()[0]
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)"))
session.cluster.control_connection.wait_for_schema_agreement()
for node in self.cluster.nodelist():
node.nodetool("disableautocompaction")
# sstable 1, Set initial values TS=1
self.update_view(session, "INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1", flush)
assert_one(session, "SELECT * FROM t_by_v", [1, 1, 'a', 3.0])
# sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record
self.update_view(session, "DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;", flush)
assert_none(session, "SELECT * FROM t_by_v")
assert_none(session, "SELECT * FROM t")
# sstable 3, tombstones of mv created by base deletion should remain.
self.update_view(session, "INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3", flush)
assert_one(session, "SELECT * FROM t_by_v", [1, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 1, None, None])
# sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)
self.update_view(session, "UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;", flush)
assert_one(session, "SELECT * FROM t_by_v", [2, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 2, None, None])
# sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;", flush)
assert_one(session, "SELECT * FROM t_by_v", [1, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect
if flush:
self.cluster.compact()
assert_one(session, "SELECT * FROM t_by_v", [1, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect
# shadow view row (id=1, v=1)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;", flush)
assert_none(session, "SELECT * FROM t_by_v")
assert_one(session, "SELECT * FROM t", [1, None, None, None])
def test_view_tombstone(self):
"""
Test that a materialized views properly tombstone
@jira_ticket CASSANDRA-10261
@jira_ticket CASSANDRA-10910
"""
self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.max_trace_wait = 120
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)"))
session.cluster.control_connection.wait_for_schema_agreement()
# Set initial values TS=0, verify
session.execute(SimpleStatement("INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'a', 3.0]
)
session.execute(SimpleStatement("INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
# change v's value and TS=3, tombstones v=1 and adds v=0 record
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_none(session, "SELECT * FROM t_by_v WHERE v = 1")
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1",
consistency_level=ConsistencyLevel.QUORUM))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
node2.start(wait_for_binary_proto=True)
# We should get a digest mismatch
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1",
consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
# We should not get a digest mismatch the second time
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
# Verify values one last time
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0],
cl=ConsistencyLevel.ALL
)
def check_trace_events(self, trace, expect_digest):
# we should see multiple requests get enqueued prior to index scan
# execution happening
# Look for messages like:
# 4.0+ Digest mismatch: Mismatch for key DecoratedKey
# <4.0 Digest mismatch: org.apache.cassandra.service.DigestMismatchException: Mismatch for key DecoratedKey
regex = r"Digest mismatch: ([a-zA-Z.]+:\s)?Mismatch for key DecoratedKey"
for event in trace.events:
desc = event.description
match = re.match(regex, desc)
if match:
if expect_digest:
break
else:
pytest.fail("Encountered digest mismatch when we shouldn't")
else:
if expect_digest:
pytest.fail("Didn't find digest mismatch")
def test_simple_repair_by_base(self):
self._simple_repair_test(repair_base=True)
def test_simple_repair_by_view(self):
self._simple_repair_test(repair_view=True)
def _simple_repair_test(self, repair_base=False, repair_view=False):
"""
Test that a materialized view are consistent after a simple repair.
"""
session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
logger.debug('Verify the data in the MV with CL=ONE')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug('Verify the data in the MV with CL=ALL. All should be unavailable.')
for i in range(1000):
statement = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
assert_unavailable(
session.execute,
statement
)
logger.debug('Start node2, and repair')
node2.start(wait_for_binary_proto=True)
if repair_base:
node1.nodetool("repair ks t")
if repair_view:
node1.nodetool("repair ks t_by_v")
logger.debug('Verify the data in the MV with CL=ALL. All should be available now and no digest mismatch')
for i in range(1000):
query = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert self._rows_to_list(result.current_rows), [[i, i, 'a' == 3.0]]
def test_base_replica_repair(self):
self._base_replica_repair_test()
def test_base_replica_repair_with_contention(self):
"""
Test repair does not fail when there is MV lock contention
@jira_ticket CASSANDRA-12905
"""
self._base_replica_repair_test(fail_mv_lock=True)
def _base_replica_repair_test(self, fail_mv_lock=False):
"""
Test that a materialized view are consistent after the repair of the base replica.
"""
self.prepare(rf=3)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Write initial data')
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
logger.debug('Verify the data in the MV with CL=ALL')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
logger.debug('Shutdown node1')
node1.stop(wait_other_notice=True)
logger.debug('Delete node1 data')
node1.clear(clear_all=True)
jvm_args = []
if fail_mv_lock:
if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134
jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]
jvm_args.append("-Dcassandra.test.fail_mv_locks_count=1000")
# this should not make Keyspace.apply throw WTE on failure to acquire lock
node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})
logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))
node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
logger.debug('Verify that there is no data on node1')
for i in range(1000):
assert_none(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i)
)
logger.debug('Restarting node2 and node3')
node2.start(wait_for_binary_proto=True)
node3.start(wait_for_binary_proto=True)
# Just repair the base replica
logger.debug('Starting repair on node1')
node1.nodetool("repair ks t")
logger.debug('Verify data with cl=ALL')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
@pytest.mark.resource_intensive
def test_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)"
"WITH gc_grace_seconds = 5")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop()
node3.stop(wait_other_notice=True)
logger.debug('Write initial data to node1 (will be replicated to node4 and node5)')
for i in range(1000):
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
logger.debug('Verify the data in the MV on node1 with CL=ONE')
for i in range(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug('Close connection to node1')
session.cluster.shutdown()
logger.debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
logger.debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
for i in range(1000):
assert_none(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i)
)
logger.debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')
for i in range(1000):
# we write i*2 as value, instead of i
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i * 2))
logger.debug('Verify the new data in the MV on node2 with CL=ONE')
for i in range(1000):
v = i * 2
assert_one(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0]
)
logger.debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
logger.debug('Start remaining nodes')
node1.start(wait_for_binary_proto=True)
node4.start(wait_for_binary_proto=True)
node5.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
logger.debug('Read data from MV at QUORUM (old data should be returned)')
for i in range(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
logger.debug('Run global repair on node1')
node1.repair()
logger.debug('Read data from MV at quorum (new data should be returned after repair)')
for i in range(1000):
v = i * 2
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
@pytest.mark.resource_intensive
def test_throttled_partition_update(self):
"""
@jira_ticket: CASSANDRA-13299, test break up large partition when repairing base with mv.
Provide a configuable batch size(cassandra.mv.mutation.row.count=100) to trottle number
of rows to be applied in one mutation
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
for node in self.cluster.nodelist():
node.nodetool("disableautocompaction")
session.execute("CREATE TABLE ks.t (pk int, ck1 int, ck2 int, v1 int, v2 int, PRIMARY KEY(pk, ck1, ck2))")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE pk IS NOT NULL AND ck1 IS NOT NULL AND ck2 IS NOT NULL "
"PRIMARY KEY (pk, ck2, ck1)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
size = 50
range_deletion_ts = 30
partition_deletion_ts = 10
for ck1 in range(size):
for ck2 in range(size):
session.execute("INSERT INTO ks.t (pk, ck1, ck2, v1, v2)"
" VALUES (1, {}, {}, {}, {}) USING TIMESTAMP {}".format(ck1, ck2, ck1, ck2, ck1))
self._replay_batchlogs()
for ck1 in range(size):
for ck2 in range(size):
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2),
[1, ck1, ck2, ck1, ck2])
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2),
[1, ck1, ck2, ck1, ck2])
logger.debug('Shutdown node4 and node5')
node4.stop(wait_other_notice=True)
node5.stop(wait_other_notice=True)
for ck1 in range(size):
for ck2 in range(size):
if ck1 % 2 == 0: # range tombstone
session.execute("DELETE FROM ks.t USING TIMESTAMP 50 WHERE pk=1 AND ck1={}".format(ck1))
elif ck1 == ck2: # row tombstone
session.execute("DELETE FROM ks.t USING TIMESTAMP 60 WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2))
elif ck1 == ck2 - 1: # cell tombstone
session.execute("DELETE v2 FROM ks.t USING TIMESTAMP 70 WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2))
# range deletion
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=1 and ck1 < 30 and ck1 > 20".format(range_deletion_ts))
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=1 and ck1 = 20 and ck2 < 10".format(range_deletion_ts))
# partition deletion for ck1 <= partition_deletion_ts
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=1".format(partition_deletion_ts))
# only partition deletion for the pk=2000
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=2000".format(partition_deletion_ts))
self._replay_batchlogs()
# start nodes with different batch size
logger.debug('Starting nodes')
node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(2)])
node3.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5)])
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(50)])
node5.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5000)])
self._replay_batchlogs()
logger.debug('repairing base table')
node1.nodetool("repair ks t")
# insert data to the deleted partition with pk=2000, they should be considered dead
session.execute("INSERT INTO ks.t (pk, ck1, ck2, v1, v2)"
" VALUES (2000, 0, 0, 0, 0) USING TIMESTAMP {}".format(partition_deletion_ts - 1))
self._replay_batchlogs()
logger.debug('stop cluster')
self.cluster.stop()
logger.debug('rolling restart to check repaired data on each node')
for node in self.cluster.nodelist():
logger.debug('starting {}'.format(node.name))
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node, consistency_level=ConsistencyLevel.ONE)
for ck1 in range(size):
for ck2 in range(size):
if (
ck1 <= partition_deletion_ts or # partition deletion
ck1 == ck2 or ck1 % 2 == 0 or # row deletion or range tombstone
(ck1 > 20 and ck1 < 30) or (ck1 == 20 and ck2 < 10) # range tombstone
):
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2))
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2))
elif ck1 == ck2 - 1: # cell tombstone
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, None])
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, None])
else:
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, ck2])
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, ck2])
# Verify partition deletion with pk=2000 has no live data
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=2000")
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=2000")
logger.debug('stopping {}'.format(node.name))
node.stop(wait_other_notice=True, wait_for_binary_proto=True)
@pytest.mark.resource_intensive
def test_really_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))"
"WITH gc_grace_seconds = 1")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND "
"v2 IS NOT NULL PRIMARY KEY (v2, v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)")
self._replay_batchlogs()
logger.debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)")
self._replay_batchlogs()
logger.debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'b'", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])
session.shutdown()
logger.debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
logger.debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
session2.execute('USE ks')
logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'")
logger.debug('Write new data in node2 that overlap those in node1')
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])
logger.debug("Composite delete of everything")
session2.execute("DELETE FROM ks.t WHERE id = 1 and v = 1")
session2.execute("DELETE FROM ks.t WHERE id = 2 and v = 2")
self._replay_batchlogs()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'")
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'")
logger.debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
logger.debug('Start remaining nodes')
node1.start(wait_for_binary_proto=True)
node4.start(wait_for_binary_proto=True)
node5.start(wait_for_binary_proto=True)
# at this point the data isn't repaired so we have an inconsistency.
# this value should return None
assert_all(
session2,
"SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],
cl=ConsistencyLevel.QUORUM
)
logger.debug('Run global repair on node1')
node1.repair()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", cl=ConsistencyLevel.QUORUM)
def test_complex_mv_select_statements(self):
"""
Test complex MV select statements
@jira_ticket CASSANDRA-9664
"""
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate(3).start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)
logger.debug("Creating keyspace")
session.execute("CREATE KEYSPACE mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
session.execute('USE mvtest')
mv_primary_keys = ["((a, b), c)",
"((b, a), c)",
"(a, b, c)",
"(c, b, a)",
"((c, a), b)"]
for mv_primary_key in mv_primary_keys:
session.execute("CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))")
insert_stmt = session.prepare("INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)")
update_stmt = session.prepare("UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?")
delete_stmt1 = session.prepare("DELETE FROM test WHERE a = ? AND b = ? AND c = ?")
delete_stmt2 = session.prepare("DELETE FROM test WHERE a = ?")
session.cluster.control_connection.wait_for_schema_agreement()
rows = [(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 1, 0, 0),
(0, 1, 1, 0),
(1, 0, 0, 0),
(1, 0, 1, 0),
(1, 1, -1, 0),
(1, 1, 0, 0),
(1, 1, 1, 0)]
for row in rows:
session.execute(insert_stmt, row)
logger.debug("Testing MV primary key: {}".format(mv_primary_key))
session.execute("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE "
"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}".format(mv_primary_key))
time.sleep(3)
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new rows that does not match the filter
session.execute(insert_stmt, (0, 0, 1, 0))
session.execute(insert_stmt, (1, 1, 0, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new row that does match the filter
session.execute(insert_stmt, (1, 2, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update rows that does not match the filter
session.execute(update_stmt, (1, 1, -1, 0))
session.execute(update_stmt, (0, 1, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update a row that does match the filter
session.execute(update_stmt, (2, 1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete rows that does not match the filter
session.execute(delete_stmt1, (1, 1, -1))
session.execute(delete_stmt1, (2, 0, 1))
session.execute(delete_stmt2, (0,))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a row that does match the filter
session.execute(delete_stmt1, (1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a partition that matches the filter
session.execute(delete_stmt2, (1,))
assert_all(session, "SELECT a, b, c, d FROM mv", [], cl=ConsistencyLevel.QUORUM)
# Cleanup
session.execute("DROP MATERIALIZED VIEW mv")
session.execute("DROP TABLE test")
def propagate_view_creation_over_non_existing_table(self):
"""
The internal addition of a view over a non existing table should be ignored
@jira_ticket CASSANDRA-13737
"""
cluster = self.cluster
cluster.populate(3)
cluster.start()
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)
create_ks(session, 'ks', 3)
session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')
# create a materialized view only in nodes 1 and 2
node3.stop(wait_other_notice=True)
session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '
'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '
'PRIMARY KEY (state, username)'))
# drop the base table only in node 3
node1.stop(wait_other_notice=True)
node2.stop(wait_other_notice=True)
node3.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)
session.execute('DROP TABLE ks.users')
# restart the cluster
cluster.stop()
cluster.start()
# node3 should have received and ignored the creation of the MV over the dropped table
assert node3.grep_log('Not adding view users_by_state because the base table')
def test_base_view_consistency_on_failure_after_mv_apply(self):
self._test_base_view_consistency_on_crash("after")
def test_base_view_consistency_on_failure_before_mv_apply(self):
self._test_base_view_consistency_on_crash("before")
def _test_base_view_consistency_on_crash(self, fail_phase):
"""
* Fails base table write before or after applying views
* Restart node and replay commit and batchlog
* Check that base and views are present
@jira_ticket CASSANDRA-13069
"""
self.cluster.set_batch_commitlog(enabled=True)
self.fixture_dtest_setup.ignore_log_patterns = [r'Dummy failure', r"Failed to force-recycle all segments"]
self.prepare(rf=1, install_byteman=True)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Make node1 fail {} view writes'.format(fail_phase))
node1.byteman_submit([mk_bman_path('fail_{}_view_write.btm'.format(fail_phase))])
logger.debug('Write 1000 rows - all node1 writes should fail')
failed = False
for i in range(1, 1000):
try:
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) USING TIMESTAMP {v}".format(v=i))
except WriteFailure:
failed = True
assert failed, "Should fail at least once."
assert node1.grep_log("Dummy failure"), "Should throw Dummy failure"
missing_entries = 0
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in range(1, 1000):
view_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, i),
consistency_level=ConsistencyLevel.ONE)))
base_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t WHERE id = {}".format(i),
consistency_level=ConsistencyLevel.ONE)))
if not base_entry:
missing_entries += 1
if not view_entry:
missing_entries += 1
logger.debug("Missing entries {}".format(missing_entries))
assert missing_entries > 0
logger.debug('Restarting node1 to ensure commit log is replayed')
node1.stop(wait_other_notice=True)
# Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below
node1.start(jvm_args=["-Dcassandra.batchlog.replay_timeout_in_ms=1"])
logger.debug('Replay batchlogs')
time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)
self._replay_batchlogs()
logger.debug('Verify that both the base table entry and view are present after commit and batchlog replay')
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in range(1, 1000):
view_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, i),
consistency_level=ConsistencyLevel.ONE)))
base_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t WHERE id = {}".format(i),
consistency_level=ConsistencyLevel.ONE)))
assert base_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry)
assert view_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry)
# For read verification
class MutationPresence(Enum):
match = 1
extra = 2
missing = 3
excluded = 4
unknown = 5
class MM(object):
mp = None
def out(self):
pass
class Match(MM):
def __init__(self):
self.mp = MutationPresence.match
def out(self):
return None
class Extra(MM):
expecting = None
value = None
row = None
def __init__(self, expecting, value, row):
self.mp = MutationPresence.extra
self.expecting = expecting
self.value = value
self.row = row
def out(self):
return "Extra. Expected {} instead of {}; row: {}".format(self.expecting, self.value, self.row)
class Missing(MM):
value = None
row = None
def __init__(self, value, row):
self.mp = MutationPresence.missing
self.value = value
self.row = row
def out(self):
return "Missing. At {}".format(self.row)
class Excluded(MM):
def __init__(self):
self.mp = MutationPresence.excluded
def out(self):
return None
class Unknown(MM):
def __init__(self):
self.mp = MutationPresence.unknown
def out(self):
return None
readConsistency = ConsistencyLevel.QUORUM
writeConsistency = ConsistencyLevel.QUORUM
SimpleRow = collections.namedtuple('SimpleRow', 'a b c d')
def row_generate(i, num_partitions):
return SimpleRow(a=i % num_partitions, b=(i % 400) // num_partitions, c=i, d=i)
# Create a threaded session and execute queries from a Queue
def thread_session(ip, queue, start, end, rows, num_partitions):
def execute_query(session, select_gi, i):
row = row_generate(i, num_partitions)
if (row.a, row.b) in rows:
base = rows[(row.a, row.b)]
else:
base = -1
gi = list(session.execute(select_gi, [row.c, row.a]))
if base == i and len(gi) == 1:
return Match()
elif base != i and len(gi) == 1:
return Extra(base, i, (gi[0][0], gi[0][1], gi[0][2], gi[0][3]))
elif base == i and len(gi) == 0:
return Missing(base, i)
elif base != i and len(gi) == 0:
return Excluded()
else:
return Unknown()
try:
cluster = Cluster([ip])
session = cluster.connect()
select_gi = session.prepare("SELECT * FROM mvtest.mv1 WHERE c = ? AND a = ?")
select_gi.consistency_level = readConsistency
for i in range(start, end):
ret = execute_query(session, select_gi, i)
queue.put_nowait(ret)
except Exception as e:
print(str(e))
queue.close()
@since('3.0')
@pytest.mark.skipif(sys.platform == 'win32', reason='Bug in python on Windows: https://bugs.python.org/issue10128')
class TestMaterializedViewsConsistency(Tester):
def prepare(self, user_table=False):
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate(3).start()
node2 = cluster.nodelist()[1]
# Keep the status of async requests
self.exception_type = collections.Counter()
self.num_request_done = 0
self.counts = {}
for mp in MutationPresence:
self.counts[mp] = 0
self.rows = {}
self.update_stats_every = 100
logger.debug("Set to talk to node 2")
self.session = self.patient_cql_connection(node2)
return self.session
def _print_write_status(self, row):
output = "\r{}".format(row)
for key in list(self.exception_type.keys()):
output = "{} ({}: {})".format(output, key, self.exception_type[key])
logger.debug(output)
def _print_read_status(self, row):
if self.counts[MutationPresence.unknown] == 0:
logger.debug(
"\rOn {}; match: {}; extra: {}; missing: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing])
)
else:
logger.debug(
"\rOn {}; match: {}; extra: {}; missing: {}; WTF: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing],
self.counts[MutationPresence.unkown])
)
def _do_row(self, insert_stmt, i, num_partitions):
# Error callback for async requests
def handle_errors(row, exc):
self.num_request_done += 1
try:
name = type(exc).__name__
self.exception_type[name] += 1
except Exception as e:
print(traceback.format_exception_only(type(e), e))
# Success callback for async requests
def success_callback(row):
self.num_request_done += 1
if i % self.update_stats_every == 0:
self._print_write_status(i)
row = row_generate(i, num_partitions)
async_ret = self.session.execute_async(insert_stmt, row)
errors = partial(handle_errors, row)
async_ret.add_callbacks(success_callback, errors)
def _populate_rows(self):
statement = SimpleStatement(
"SELECT a, b, c FROM mvtest.test1",
consistency_level=readConsistency
)
data = self.session.execute(statement)
for row in data:
self.rows[(row.a, row.b)] = row.c
@pytest.mark.skip(reason='awaiting CASSANDRA-11290')
def test_single_partition_consistent_reads_after_write(self):
"""
Tests consistency of multiple writes to a single partition
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(1)
def test_multi_partition_consistent_reads_after_write(self):
"""
Tests consistency of multiple writes to a multiple partitions
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(5)
def _consistent_reads_after_write_test(self, num_partitions):
session = self.prepare()
node1, node2, node3 = self.cluster.nodelist()
# Test config
lower = 0
upper = 100000
processes = 4
queues = [None] * processes
eachProcess = (upper - lower) // processes
logger.debug("Creating schema")
session.execute(
("CREATE KEYSPACE IF NOT EXISTS mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
)
session.execute(
"CREATE TABLE mvtest.test1 (a int, b int, c int, d int, PRIMARY KEY (a,b))"
)
session.cluster.control_connection.wait_for_schema_agreement()
insert1 = session.prepare("INSERT INTO mvtest.test1 (a,b,c,d) VALUES (?,?,?,?)")
insert1.consistency_level = writeConsistency
logger.debug("Writing data to base table")
for i in range(upper // 10):
self._do_row(insert1, i, num_partitions)
logger.debug("Creating materialized view")
session.execute(
('CREATE MATERIALIZED VIEW mvtest.mv1 AS '
'SELECT a,b,c,d FROM mvtest.test1 WHERE a IS NOT NULL AND b IS NOT NULL AND '
'c IS NOT NULL PRIMARY KEY (c,a,b)')
)
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug("Writing more data to base table")
for i in range(upper // 10, upper):
self._do_row(insert1, i, num_partitions)
# Wait that all requests are done
while self.num_request_done < upper:
time.sleep(1)
logger.debug("Making sure all batchlogs are replayed on node1")
node1.nodetool("replaybatchlog")
logger.debug("Making sure all batchlogs are replayed on node2")
node2.nodetool("replaybatchlog")
logger.debug("Making sure all batchlogs are replayed on node3")
node3.nodetool("replaybatchlog")
logger.debug("Finished writes, now verifying reads")
self._populate_rows()
threads = []
for i in range(processes):
start = lower + (eachProcess * i)
if i == processes - 1:
end = upper
else:
end = lower + (eachProcess * (i + 1))
q = Queue()
node_ip = get_ip_from_node(node2)
t = threading.Thread(target=thread_session, args=(node_ip, q, start, end, self.rows, num_partitions))
threads.append(t)
t.daemon = True
t.start()
queues[i] = q
for i in range(lower, upper):
if i % 100 == 0:
self._print_read_status(i)
try:
mm = queues[i % processes].get(timeout=60)
except Empty as e:
pytest.skip("Failed to get range {range} within timeout from queue. {error}".format(range=i, error=str(e)))
if not mm.out() is None:
logger.debug("\r{}\n" .format(mm.out()))
self.counts[mm.mp] += 1
self._print_read_status(upper)
for thread in threads:
thread.join(timeout=300)
@since('3.0')
class TestMaterializedViewsLockcontention(Tester):
"""
Test materialized views lock contention.
@jira_ticket CASSANDRA-12689
@since 3.0
"""
def _prepare_cluster(self):
self.cluster.populate(1)
self.cluster.set_configuration_options({'enable_materialized_views': 'true'})
self.supports_v5_protocol = self.supports_v5_protocol(self.cluster.version())
self.protocol_version = 5 if self.supports_v5_protocol else 4
self.cluster.set_configuration_options(values={
'concurrent_materialized_view_writes': 1,
'concurrent_writes': 1,
})
self.nodes = list(self.cluster.nodes.values())
self.cluster.start(jvm_args=[
"-Dcassandra.test.fail_mv_locks_count=64"
])
session = self.patient_exclusive_cql_connection(self.nodes[0], protocol_version=self.protocol_version)
keyspace = "locktest"
session.execute("""
CREATE KEYSPACE IF NOT EXISTS {}
WITH replication = {{ 'class': 'SimpleStrategy', 'replication_factor': '1' }}
""".format(keyspace))
session.set_keyspace(keyspace)
session.execute(
"CREATE TABLE IF NOT EXISTS test (int1 int, int2 int, date timestamp, PRIMARY KEY (int1, int2))")
session.execute("""CREATE MATERIALIZED VIEW test_sorted_mv AS
SELECT int1, date, int2
FROM test
WHERE int1 IS NOT NULL AND date IS NOT NULL AND int2 IS NOT NULL
PRIMARY KEY (int1, date, int2)
WITH CLUSTERING ORDER BY (date DESC, int2 DESC)""")
return session
@since('3.0')
def test_mutations_dontblock(self):
session = self._prepare_cluster()
records = 100
records2 = 100
params = []
for x in range(records):
for y in range(records2):
params.append([x, y])
execute_concurrent_with_args(
session,
session.prepare('INSERT INTO test (int1, int2, date) VALUES (?, ?, toTimestamp(now()))'),
params
)
assert_one(session, "SELECT count(*) FROM test WHERE int1 = 1", [records2])
for node in self.nodes:
with JolokiaAgent(node) as jmx:
mutationStagePending = jmx.read_attribute(
make_mbean('metrics', type="ThreadPools", path='request', scope='MutationStage', name='PendingTasks'), "Value"
)
assert 0 == mutationStagePending, "Pending mutations: {}".format(mutationStagePending)
|
limiter.py | from unittest import TestCase
import redis
from function_limiter.limiter import Limiter
from function_limiter.limiter import RateLimitExceeded
import time
from multiprocessing import Process
class TestSimpleFiveRequest(TestCase):
def test_five_call(self):
limiter = Limiter()
@limiter.limit('5/minute', 'key')
def func():
pass
i = 0
for i in range(5):
func()
self.assertEqual(4, i)
def test_more_than_limitation_call(self):
limiter = Limiter()
@limiter.limit('5/minute', 'key')
def func():
pass
i = 0
with self.assertRaises(RateLimitExceeded):
for i in range(6):
func()
self.assertEqual(5, i)
def test_five_call_using_per(self):
limiter = Limiter()
@limiter.limit('5 per minute', 'key')
def func():
pass
i = 0
for i in range(5):
func()
self.assertEqual(4, i)
class TestMultipleLimitations(TestCase):
def test_single_line_limitations(self):
limiter = Limiter()
@limiter.limit('1/second;3/minute', 'key')
def func():
pass
i = 0
for i in range(3):
func()
time.sleep(1)
self.assertEqual(2, i)
def test_single_line_limitations_more_than_first_limitation(self):
limiter = Limiter()
@limiter.limit('1/second;3/minute', 'key')
def func():
pass
i = 0
with self.assertRaises(RateLimitExceeded):
for i in range(3):
func()
self.assertEqual(1, i)
def test_single_line_limitations_more_than_second_limitation(self):
limiter = Limiter()
@limiter.limit('1/second;3/minute', 'key')
def func():
pass
i = 0
with self.assertRaises(RateLimitExceeded):
for i in range(4):
func()
time.sleep(1)
self.assertEqual(3, i)
# def test_multiple_line_limitations(self):
# limiter = Limiter()
#
# @limiter.limit('3/minute', 'key')
# @limiter.limit('1/second', 'key')
# def func():
# pass
#
# i = 0
#
# for i in range(3):
# func()
# time.sleep(1)
#
# self.assertEqual(i, 2)
#
# def test_multiple_line_limitations_more_than_first_limitation(self):
# limiter = Limiter()
#
# @limiter.limit('3/minute', 'key')
# @limiter.limit('1/second', 'key')
# def func():
# pass
#
# i = 0
# try:
# for i in range(3):
# func()
# except Exception as e:
# self.assertIsInstance(e, RateLimitExceeded)
# self.assertEqual(i, 2)
#
# def test_multiple_line_limitations_more_than_second_limitation(self):
# limiter = Limiter()
#
# @limiter.limit('3/minute', 'key')
# @limiter.limit('1/second', 'key')
# def func():
# pass
#
# i = 0
# try:
# for i in range(4):
# func()
# time.sleep(1)
#
# except Exception as e:
# self.assertIsInstance(e, RateLimitExceeded)
# self.assertEqual(i, 2)
class TestCallableFunctionForKeys(TestCase):
def setUp(self):
def limitations() -> str:
return '3/minute'
self.limitations = limitations
def key() -> str:
return 'key'
self.key = key
def exempt() -> str:
return 'key'
self.exempt = exempt
def test_callable_function_for_limitations(self):
limiter = Limiter()
@limiter.limit(self.limitations, 'key')
def func():
pass
i = 0
for i in range(3):
func()
self.assertEqual(2, i)
def test_more_than_limitation_call_for_limitations(self):
limiter = Limiter()
@limiter.limit(self.limitations, 'key')
def func():
pass
i = 0
with self.assertRaises(RateLimitExceeded):
for i in range(4):
func()
self.assertEqual(3, i)
def test_more_than_limitation_call_for_key(self):
limiter = Limiter()
@limiter.limit('3/minute', self.key)
def func():
pass
i = 0
with self.assertRaises(RateLimitExceeded):
for i in range(4):
func()
self.assertEqual(3, i)
def test_more_than_limitation_call_for_exempt(self):
limiter = Limiter()
@limiter.limit('3/minute', 'key', exempt=self.exempt)
def func():
pass
i = 0
for i in range(4):
func()
self.assertEqual(3, i)
class TestDefaultKeys(TestCase):
def test_default_limitations(self):
limiter = Limiter(
default_limitations='3/minute'
)
@limiter.limit(None, 'key')
def func():
pass
i = 0
with self.assertRaises(RateLimitExceeded):
for i in range(4):
func()
self.assertEqual(3, i)
def test_default_limitations_key(self):
limiter = Limiter(
default_key='key'
)
@limiter.limit('3/minute')
def func():
pass
i = 0
with self.assertRaises(RateLimitExceeded):
for i in range(4):
func()
self.assertEqual(3, i)
def test_default_exempt_key(self):
limiter = Limiter(
default_exempt='key'
)
@limiter.limit('3/minute', 'key')
def func():
pass
i = 0
for i in range(10):
func()
self.assertEqual(9, i)
class TestExemptKey(TestCase):
def test_exempt_key(self):
limiter = Limiter(
default_key='key'
)
@limiter.limit('3/minute', 'key', exempt='key')
def func():
pass
i = 0
for i in range(10):
func()
self.assertEqual(9, i)
def test_exempt_key_not_equal(self):
limiter = Limiter(
default_key='key'
)
@limiter.limit('3/minute', 'key', exempt='other-key')
def func():
pass
i = 0
with self.assertRaises(RateLimitExceeded):
for i in range(4):
func()
self.assertEqual(3, i)
class TestRedis(TestCase):
def tearDown(self):
storage = redis.from_url(url='redis://127.0.0.1:6379/', db=0)
storage.delete('function-limiter')
storage.delete('custom_database_name')
def test_redis_for_single_instance(self):
limiter = Limiter(
storage_uri='redis://127.0.0.1:6379/'
)
@limiter.limit('3/minute', 'key')
def func():
pass
i = 0
for i in range(3):
func()
self.assertEqual(2, i)
def test_redis_for_single_instance_over_rate(self):
limiter = Limiter(
storage_uri='redis://127.0.0.1:6379/'
)
@limiter.limit('3/minute', 'over-key')
def func():
pass
i = 0
with self.assertRaises(RateLimitExceeded):
for i in range(4):
func()
self.assertEqual(3, i)
def test_redis_for_multiple_instance(self):
limiter = Limiter(
storage_uri='redis://127.0.0.1:6379/'
)
@limiter.limit('3/minute', 'multiple-instance-key')
def func():
pass
processes = list()
i = 0
try:
for i in range(4):
processes.append(Process(target=func))
for process in processes:
process.start()
for process in processes:
process.join()
except Exception as e:
self.assertIsInstance(e, RateLimitExceeded)
self.assertEqual(3, i)
def test_redis_custom_database_name(self):
limiter = Limiter(
database_name='custom_database_name',
storage_uri='redis://127.0.0.1:6379/'
)
@limiter.limit('3/minute', 'key-k')
def func():
pass
i = 0
try:
for i in range(3):
func()
except Exception as e:
self.assertIsInstance(e, RateLimitExceeded)
self.assertEqual(2, i)
class TestWrongInput(TestCase):
def test_wrong_limitations_format(self):
limiter = Limiter()
@limiter.limit('wrong input', 'key')
def func():
pass
i = 0
for i in range(10):
func()
self.assertEqual(9, i)
def test_wrong_type_limitations_format(self):
limiter = Limiter()
@limiter.limit(0, 'key')
def func():
pass
i = 0
for i in range(10):
func()
self.assertEqual(9, i)
class TestGarbageCollector(TestCase):
def test_garbage_collector_with_garbage(self):
limiter = Limiter()
@limiter.limit('1/second', 'key')
def func():
pass
i = 0
for i in range(4):
func()
time.sleep(1)
self.assertEqual(3, i)
def test_garbage_collector_none_garbage(self):
limiter = Limiter()
@limiter.limit('3/minute', 'key')
def func():
pass
i = 0
with self.assertRaises(RateLimitExceeded):
for i in range(4):
func()
self.assertEqual(3, i)
|
example.py | #!/usr/bin/env python
__author__ = "Gabriel Bassett"
"""
AUTHOR: {0}
DATE: <DATE>
DEPENDENCIES: <a list of modules requiring installation>
Copyright <YEAR> {0}
LICENSE:
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
DESCRIPTION:
<ENTER DESCRIPTION>
""".format(__author__)
# PRE-USER SETUP
pass
########### NOT USER EDITABLE ABOVE THIS POINT #################
# USER VARIABLES
PLUGIN_CONFIG_FILE = "plugin_template.yapsy-plugin" # CHANGEME
NAME = "<NAME FROM CONFIG FILE AS BACKUP IF CONFIG FILE DOESN'T LOAD>" # CHANGEME
########### NOT USER EDITABLE BELOW THIS POINT #################
## IMPORTS
from yapsy.IPlugin import IPlugin
import logging
import networkx as nx
from datetime import datetime # timedelta imported above
import uuid
import ConfigParser
import inspect
import threading
"""
try:
import <SOME UNIQUE MODULE>
module_import_success = True
except:
module_import_success = False
logging.error("Module import failed. Please install the following module: <SOME UNIQUE MODULE>.")
"""
## SETUP
loc = inspect.getfile(inspect.currentframe())
ind = loc.rfind("/")
loc = loc[:ind+1]
config = ConfigParser.SafeConfigParser()
config.readfp(open(loc + PLUGIN_CONFIG_FILE))
if config.has_section('Core'):
if 'name' in config.options('Core'):
NAME = config.get('Core', 'name')
if config.has_section('Log'):
if 'level' in config.options('Log'):
LOGLEVEL = config.get('Log', 'level')
if 'file' in config.options('Log'):
LOGFILE = config.get('Log', 'file')
## EXECUTION
class PluginOne(IPlugin):
inputs = None
shutdown = False # Used to trigger shutdown of a minion
# CHANGEME: The init should contain anything to load modules or data files that should be variables of the plugin object
def __init__(self):
pass
# CHANGEME: Configuration needs to set the values needed to identify the plugin in the plugin database as well as ensure everyhing loaded correctly
# CHANGEME: Current layout is for an enrichment plugin
# CHANGEME: enrichment [type, successful_load, name, description, inputs to enrichment such as 'ip', cost, speed]
# CHANGEME: interface [type, successful_load, name]
# CHANGEME: score [type, successful_load, name, description, cost, speed]
# CHANGEME: minion [type, successful_load, name, description, cost]
def configure(self):
"""
:return: return list of configuration variables starting with [plugin_type, successful_load, name, description, <PLUGIN TYPE SPECIFIC VALUES>]
"""
config_options = config.options("Configuration")
# Cost and speed are not applicable to all plugin types
"""
if 'cost' in config_options:
cost = config.get('Configuration', 'cost')
else:
cost = 9999
if 'speed' in config_options:
speed = config.get('Configuration', 'speed')
else:
speed = 9999
"""
if config.has_section('Documentation') and 'description' in config.options('Documentation'):
description = config.get('Configuration', 'type')
else:
logging.error("'Description not in config file.")
return [None, False, NAME, None, cost, speed]
if 'type' in config_options:
plugin_type = config.get('Configuration', 'type')
else:
logging.error("'Type' not specified in config file.")
return [None, False, NAME, description, None, cost, speed]
# Inputs is only applicable to enrichment plugins
"""
if 'inputs' in config_options:
self.inputs = config.get('Configuration', 'Inputs')
self.inputs = [l.strip().lower() for l in self.inputs.split(",")]
else:
logging.error("No input types specified in config file.")
return [plugin_type, False, NAME, description, None, cost, speed]
"""
# Module success is only applicable to plugins which import unique code
"""
if not module_import_success:
logging.error("Module import failure caused configuration failure.")
return [plugin_type, False, NAME, description, self.inputs, cost, speed]
"""
return [plugin_type, True, NAME, description, self.inputs, cost, speed]
############ GENERAL NOTES ############
# CHANGEME: All functions must implement a "configuration()" function
# CHANGEME: The correct type of execution function must be defined for the type of plugin
############ GENERAL NOTES ############
# CHANGEME: enrichment: "run(<thing to enrich>, inputs, start_time, any other plugin-specific attributes-MUST HAVE DEFAULTS)
# CHANGEME: Enrichment plugin specifics:
# - Created nodes/edges must follow http://blog.infosecanalytics.com/2014/11/cyber-attack-graph-schema-cags-20.html
# - The enrichment should include a node for the <thing to enrich>
# - The enrichment should include a node for the enrichment which is is statically defined & key of "enrichment"
# - An edge should exist from <thing to enrich> to the enrichment node, created at the end after enrichment
# - Each enrichment datum should have a node
# - An edge should exist from <thing to enrich> to each enrichment datum
# - The run function should then return a networkx directed multi-graph including the nodes and edges
def run(self, enrichment_target, inputs=None, start_time=""):
"""
:param enrichment_target: a string containing a target to enrich
:return: a networkx graph representing the sections of the domain
"""
pass # TODO: Place enrichment in here
return g
# CHANGEME: interface: enrich(graph, any other plugin-specific attributes-MUST HAVE DEFAULTS)
# CHANGEME: query(topic, max_depth, config, dont_follow, any other plugin-specific attributes-MUST HAVE DEFAULTS)
# CHANGEME: Interface plugin specifics:
# - In the most efficient way possible, merge nodes and edges into the storage medium
# - Merger of nodes should be done based on matching key & value.
# - URI should remain static for a given node.
# - Start time should be updated to the sending graph
# - Edges should be added w/o attempts to merge with edges in the storage back end
# - When adding nodes it is highly recommended to keep a node-to-storage-id mapping with a key of the node
# - URI. This will assist in bulk-adding the edges.
# - Query specifics of interface plugins:
# - In the most efficient way possible retrieve and return the merged subgraph (as a networkx graph) including all nodes and
# - edges within the max_distance from any node in the topic graph from the storage backend graph.
# - As a default, ['enrichment', 'classification'] should not be followed.
# - The query function must add a 'topic_distance' property to all nodes.
def enrich(self, g):
"""
:param g: networkx graph to be merged
:return: Nonetype
"""
pass # TODO: Replace this with storage into a backend storage system
# CHANGEME: score: score(subgraph, topic, any other plugin-specific attributes-MUST HAVE DEFAULTS)
# CHANGEME: Score plugin specifics:
# - Scoring plugins should take a topic and networkx (sub)graph and return a dictionary keyed with the node (name) and with
# - values of the score assigned to the node for the given topic.
def score(self, sg, topic): # get_bayesian_network_probability
"""
:param sg: egocentric subgraph around topic in networkx format
:param topic: graph of topics
:return: Dictionary of probabilities keyed by node
"""
scores = dict()
pass # TODO: Replace with code to score the subgraph with respect to the topic
return scores
# CHANGEME: minion: minion()
# CHANGEME: start()
# CHANGEME: stop()
# CHANGEME: isAlive()
# CHANGEME: Minion plugin specifics:
# - Minions fit exist in a separate directory to prevent them importing themselves when they import their own VERUM instance
# - The minion configuration function must take an argument of the parent verum object. When not present, it shouldn't error but
# - instead return with successful_load set to false and a logging.info message that the parent was not passed in.
# - Must have 4 functions: minion(), start(), and stop() and isAlive()
# - minion() is the function which will be threaded. **Make sure to call create the new verum instance WITHIN this function
# - to avoid SQLite errors!**
# - start() creates the thread object as an attribute of the plugin class and starts it
# - stop() stops the thread. Preferably with both a normal exit by setting a shutdown variable of the plugin class as well as a
# - force stop option which removes the thread object
# - isAlive() calls the thread isAlive() function and returns the status
def minion(self, *args, **xargs):
self.shutdown = False
pass # TODO: Write the function which will be threaded to form the minion
def start(self, *args, **xargs):
self.thread = threading.Thread(target=self.minion, *args, **xargs)
self.thread.start()
def isAlive(self):
if self.thread is None:
return False
else:
return self.thread.isAlive()
def stop(self, force=True):
if force:
self.thread = None # zero out thread
else:
self.shutdown = False # just dont' iterate. May take up to (SLEEP_TIME) hours |
test_stream.py | import gc
import threading
import unittest
import pytest
import cupy
from cupy._creation import from_data
from cupy import cuda
from cupy import testing
@testing.parameterize(
*testing.product({
'stream_name': ['null', 'ptds'],
}))
@testing.gpu
class TestStream(unittest.TestCase):
def setUp(self):
if cuda.runtime.is_hip and self.stream_name == 'ptds':
self.skipTest('HIP does not support PTDS')
self._prev_stream = cuda.get_current_stream()
if self.stream_name == 'null':
self.stream = cuda.Stream.null
elif self.stream_name == 'ptds':
self.stream = cuda.Stream.ptds
self.stream.use()
def tearDown(self):
self._prev_stream.use()
@unittest.skipIf(cuda.runtime.is_hip, 'This test is only for CUDA')
def test_eq_cuda(self):
null0 = self.stream
if self.stream == cuda.Stream.null:
null1 = cuda.Stream(True)
null2 = cuda.Stream(True)
null3 = cuda.Stream(ptds=True)
else:
null1 = cuda.Stream(ptds=True)
null2 = cuda.Stream(ptds=True)
null3 = cuda.Stream(True)
null4 = cuda.Stream()
assert null0 == null1
assert null1 == null2
assert null2 != null3
assert null2 != null4
@unittest.skipIf(not cuda.runtime.is_hip, 'This test is only for HIP')
def test_eq_hip(self):
null0 = self.stream
null1 = cuda.Stream(True)
null2 = cuda.Stream(True)
null3 = cuda.Stream()
assert null0 == null1
assert null1 == null2
assert null2 != null3
def check_del(self, null, ptds):
stream = cuda.Stream(null=null, ptds=ptds).use()
assert stream is cuda.get_current_stream()
stream_ptr = stream.ptr
x = from_data.array([1, 2, 3])
del stream
assert stream_ptr == cuda.get_current_stream().ptr
cuda.Stream.null.use()
assert cuda.Stream.null is cuda.get_current_stream()
# Want to test cudaStreamDestory is issued, but
# runtime.streamQuery(stream_ptr) causes SEGV. We cannot test...
del x
def test_del_default(self):
self.check_del(null=False, ptds=False)
def test_del(self):
null = self.stream == cuda.Stream.null
if cuda.runtime.is_hip:
ptds = False
else:
ptds = self.stream == cuda.Stream.ptds
self.check_del(null=null, ptds=ptds)
def test_get_and_add_callback(self):
N = 100
cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)]
if not cuda.runtime.is_hip:
stream = self.stream
else:
# adding callbacks to the null stream in HIP would segfault...
stream = cuda.Stream()
out = []
stream_list = []
def _callback(s, _, t):
out.append(t[0])
stream_list.append(s.ptr)
for i in range(N):
numpy_array = cupy_arrays[i].get(stream=stream)
stream.add_callback(
_callback,
(i, numpy_array))
stream.synchronize()
assert out == list(range(N))
assert all(s == stream.ptr for s in stream_list)
@unittest.skipIf(cuda.runtime.is_hip,
'HIP does not support launch_host_func')
@unittest.skipIf(cuda.driver.get_build_version() < 10000,
'Only CUDA 10.0+ supports this')
def test_launch_host_func(self):
N = 100
cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)]
stream = cuda.Stream.null
out = []
for i in range(N):
numpy_array = cupy_arrays[i].get(stream=stream)
stream.launch_host_func(
lambda t: out.append(t[0]), (i, numpy_array))
stream.synchronize()
assert out == list(range(N))
def test_with_statement(self):
stream1 = cuda.Stream()
stream2 = cuda.Stream()
assert self.stream == cuda.get_current_stream()
with stream1:
assert stream1 == cuda.get_current_stream()
with stream2:
assert stream2 == cuda.get_current_stream()
assert stream1 == cuda.get_current_stream()
# self.stream is "forgotten"!
assert cuda.Stream.null == cuda.get_current_stream()
def test_use(self):
stream1 = cuda.Stream().use()
assert stream1 == cuda.get_current_stream()
self.stream.use()
assert self.stream == cuda.get_current_stream()
@testing.multi_gpu(2)
def test_per_device(self):
with cuda.Device(0):
stream0 = cuda.Stream()
with stream0:
assert stream0 == cuda.get_current_stream()
with cuda.Device(1):
assert stream0 != cuda.get_current_stream()
assert cuda.Stream.null == cuda.get_current_stream()
assert stream0 == cuda.get_current_stream()
@testing.multi_gpu(2)
def test_per_device_failure(self):
with cuda.Device(0):
stream0 = cuda.Stream()
with cuda.Device(1):
with pytest.raises(RuntimeError):
with stream0:
pass
with pytest.raises(RuntimeError):
stream0.use()
def test_mix_use_context(self):
# See cupy/cupy#5143
s1 = cuda.Stream()
s2 = cuda.Stream()
s3 = cuda.Stream()
assert cuda.get_current_stream() == self.stream
with s1:
assert cuda.get_current_stream() == s1
s2.use()
assert cuda.get_current_stream() == s2
with s3:
assert cuda.get_current_stream() == s3
del s2
assert cuda.get_current_stream() == s1
# self.stream is "forgotten"!
assert cuda.get_current_stream() == cuda.Stream.null
def test_stream_thread(self):
s1 = None
def f1(barrier, errors):
global s1
tid = barrier.wait()
try:
s1 = cuda.Stream()
barrier.wait() # until t2 starts
s1.use()
barrier.wait() # until t2 uses the stream
s1 = None
gc.collect()
barrier.wait() # until t2 decrefs the stream
assert cuda.get_current_stream() is not None
cupy.arange(10)
errors[tid] = False
except Exception as e:
print(f'error in {tid}: {e}')
def f2(barrier, errors):
global s1
tid = barrier.wait()
try:
barrier.wait() # until t1 creates the stream
s1.use()
barrier.wait() # until t1 uses the stream
s1 = None
gc.collect()
barrier.wait() # until t1 decrefs the stream
assert cuda.get_current_stream() is not None
cupy.arange(10)
errors[tid] = False
except Exception as e:
print(f'error in {tid}: {e}')
barrier = threading.Barrier(2)
errors = [True, True]
threads = [
threading.Thread(target=f1, args=(barrier, errors), daemon=True),
threading.Thread(target=f2, args=(barrier, errors), daemon=True),
]
del s1
for t in threads:
t.start()
for t in threads:
t.join()
for err in errors:
assert err is False
@testing.gpu
class TestExternalStream(unittest.TestCase):
def setUp(self):
self.stream_ptr = cuda.runtime.streamCreate()
self.stream = cuda.ExternalStream(self.stream_ptr)
def tearDown(self):
cuda.runtime.streamDestroy(self.stream_ptr)
def test_get_and_add_callback(self):
N = 100
cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)]
stream = self.stream
out = []
for i in range(N):
numpy_array = cupy_arrays[i].get(stream=stream)
stream.add_callback(
lambda _, __, t: out.append(t[0]),
(i, numpy_array))
stream.synchronize()
assert out == list(range(N))
@unittest.skipIf(cuda.runtime.is_hip,
'HIP does not support launch_host_func')
@unittest.skipIf(cuda.driver.get_build_version() < 10000,
'Only CUDA 10.0+ supports this')
def test_launch_host_func(self):
N = 100
cupy_arrays = [testing.shaped_random((2, 3)) for _ in range(N)]
stream = self.stream
out = []
for i in range(N):
numpy_array = cupy_arrays[i].get(stream=stream)
stream.launch_host_func(
lambda t: out.append(t[0]), (i, numpy_array))
stream.synchronize()
assert out == list(range(N))
|
manager.py | from dataclasses import dataclass
import logging
import threading
import time
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from concurrent.futures.thread import ThreadPoolExecutor
from blspy import G1Element
from chiapos import DiskProver
from chia.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, _expected_plot_size
from chia.plotting.util import (
PlotInfo,
PlotRefreshResult,
PlotsRefreshParameter,
PlotRefreshEvents,
get_plot_filenames,
parse_plot_info,
stream_plot_info_pk,
stream_plot_info_ph,
)
from chia.util.generator_tools import list_to_batches
from chia.util.ints import uint16
from chia.util.path import mkdir
from chia.util.streamable import Streamable, streamable
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.wallet.derive_keys import master_sk_to_local_sk
log = logging.getLogger(__name__)
CURRENT_VERSION: uint16 = uint16(0)
@dataclass(frozen=True)
@streamable
class CacheEntry(Streamable):
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
@dataclass(frozen=True)
@streamable
class DiskCache(Streamable):
version: uint16
data: List[Tuple[bytes32, CacheEntry]]
class Cache:
_changed: bool
_data: Dict[bytes32, CacheEntry]
def __init__(self, path: Path):
self._changed = False
self._data = {}
self._path = path
if not path.parent.exists():
mkdir(path.parent)
def __len__(self):
return len(self._data)
def update(self, plot_id: bytes32, entry: CacheEntry):
self._data[plot_id] = entry
self._changed = True
def remove(self, cache_keys: List[bytes32]):
for key in cache_keys:
if key in self._data:
del self._data[key]
self._changed = True
def save(self):
try:
disk_cache: DiskCache = DiskCache(
CURRENT_VERSION, [(plot_id, cache_entry) for plot_id, cache_entry in self.items()]
)
serialized: bytes = bytes(disk_cache)
self._path.write_bytes(serialized)
self._changed = False
log.info(f"Saved {len(serialized)} bytes of cached data")
except Exception as e:
log.error(f"Failed to save cache: {e}, {traceback.format_exc()}")
def load(self):
try:
serialized = self._path.read_bytes()
log.info(f"Loaded {len(serialized)} bytes of cached data")
stored_cache: DiskCache = DiskCache.from_bytes(serialized)
if stored_cache.version != CURRENT_VERSION:
# TODO, Migrate or drop current cache if the version changes.
raise ValueError(f"Invalid cache version {stored_cache.version}. Expected version {CURRENT_VERSION}.")
self._data = {plot_id: cache_entry for plot_id, cache_entry in stored_cache.data}
except FileNotFoundError:
log.debug(f"Cache {self._path} not found")
except Exception as e:
log.error(f"Failed to load cache: {e}, {traceback.format_exc()}")
def keys(self):
return self._data.keys()
def items(self):
return self._data.items()
def get(self, plot_id):
return self._data.get(plot_id)
def changed(self):
return self._changed
def path(self):
return self._path
class PlotManager:
plots: Dict[Path, PlotInfo]
plot_filename_paths: Dict[str, Tuple[str, Set[str]]]
plot_filename_paths_lock: threading.Lock
failed_to_open_filenames: Dict[Path, int]
no_key_filenames: Set[Path]
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
cache: Cache
match_str: Optional[str]
show_memo: bool
open_no_key_filenames: bool
last_refresh_time: float
refresh_parameter: PlotsRefreshParameter
log: Any
_lock: threading.Lock
_refresh_thread: Optional[threading.Thread]
_refreshing_enabled: bool
_refresh_callback: Callable
def __init__(
self,
root_path: Path,
refresh_callback: Callable,
match_str: Optional[str] = None,
show_memo: bool = False,
open_no_key_filenames: bool = False,
refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(),
):
self.root_path = root_path
self.plots = {}
self.plot_filename_paths = {}
self.plot_filename_paths_lock = threading.Lock()
self.failed_to_open_filenames = {}
self.no_key_filenames = set()
self.farmer_public_keys = []
self.pool_public_keys = []
self.cache = Cache(self.root_path.resolve() / "cache" / "plot_manager.dat")
self.match_str = match_str
self.show_memo = show_memo
self.open_no_key_filenames = open_no_key_filenames
self.last_refresh_time = 0
self.refresh_parameter = refresh_parameter
self.log = logging.getLogger(__name__)
self._lock = threading.Lock()
self._refresh_thread = None
self._refreshing_enabled = False
self._refresh_callback = refresh_callback # type: ignore
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_value, exc_traceback):
self._lock.release()
def reset(self):
self.last_refresh_time = time.time()
self.plots.clear()
self.plot_filename_paths.clear()
self.failed_to_open_filenames.clear()
self.no_key_filenames.clear()
def set_refresh_callback(self, callback: Callable):
self._refresh_callback = callback # type: ignore
def set_public_keys(self, farmer_public_keys: List[G1Element], pool_public_keys: List[G1Element]):
self.farmer_public_keys = farmer_public_keys
self.pool_public_keys = pool_public_keys
def public_keys_available(self):
return len(self.farmer_public_keys) and len(self.pool_public_keys)
def plot_count(self):
with self:
return len(self.plots)
def get_duplicates(self):
result = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
_, duplicated_paths = paths_entry
for path in duplicated_paths:
result.append(Path(path) / plot_filename)
return result
def needs_refresh(self) -> bool:
return time.time() - self.last_refresh_time > float(self.refresh_parameter.interval_seconds)
def start_refreshing(self):
self._refreshing_enabled = True
if self._refresh_thread is None or not self._refresh_thread.is_alive():
self.cache.load()
self._refresh_thread = threading.Thread(target=self._refresh_task)
self._refresh_thread.start()
def stop_refreshing(self):
self._refreshing_enabled = False
if self._refresh_thread is not None and self._refresh_thread.is_alive():
self._refresh_thread.join()
self._refresh_thread = None
def trigger_refresh(self):
log.debug("trigger_refresh")
self.last_refresh_time = 0
def _refresh_task(self):
try:
while self._refreshing_enabled:
while not self.needs_refresh() and self._refreshing_enabled:
time.sleep(1)
if not self._refreshing_enabled:
return
plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(self.root_path)
plot_directories: Set[Path] = set(plot_filenames.keys())
plot_paths: List[Path] = []
for paths in plot_filenames.values():
plot_paths += paths
total_result: PlotRefreshResult = PlotRefreshResult()
total_size = len(plot_paths)
self._refresh_callback(PlotRefreshEvents.started, PlotRefreshResult(remaining=total_size))
# First drop all plots we have in plot_filename_paths but not longer in the filesystem or set in config
def plot_removed(test_path: Path):
return not test_path.exists() or test_path.parent not in plot_directories
filenames_to_remove: List[str] = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
loaded_path, duplicated_paths = paths_entry
loaded_plot = Path(loaded_path) / Path(plot_filename)
if plot_removed(loaded_plot):
filenames_to_remove.append(plot_filename)
if loaded_plot in self.plots:
del self.plots[loaded_plot]
total_result.removed.append(loaded_plot)
# No need to check the duplicates here since we drop the whole entry
continue
paths_to_remove: List[str] = []
for path in duplicated_paths:
loaded_plot = Path(path) / Path(plot_filename)
if plot_removed(loaded_plot):
paths_to_remove.append(path)
total_result.removed.append(loaded_plot)
for path in paths_to_remove:
duplicated_paths.remove(path)
for filename in filenames_to_remove:
del self.plot_filename_paths[filename]
for remaining, batch in list_to_batches(plot_paths, self.refresh_parameter.batch_size):
batch_result: PlotRefreshResult = self.refresh_batch(batch, plot_directories)
if not self._refreshing_enabled:
self.log.debug("refresh_plots: Aborted")
break
# Set the remaining files since `refresh_batch()` doesn't know them but we want to report it
batch_result.remaining = remaining
total_result.loaded += batch_result.loaded
total_result.processed += batch_result.processed
total_result.duration += batch_result.duration
self._refresh_callback(PlotRefreshEvents.batch_processed, batch_result)
if remaining == 0:
break
batch_sleep = self.refresh_parameter.batch_sleep_milliseconds
self.log.debug(f"refresh_plots: Sleep {batch_sleep} milliseconds")
time.sleep(float(batch_sleep) / 1000.0)
if self._refreshing_enabled:
self._refresh_callback(PlotRefreshEvents.done, total_result)
# Cleanup unused cache
available_ids = set([plot_info.prover.get_id() for plot_info in self.plots.values()])
invalid_cache_keys = [plot_id for plot_id in self.cache.keys() if plot_id not in available_ids]
self.cache.remove(invalid_cache_keys)
self.log.debug(f"_refresh_task: cached entries removed: {len(invalid_cache_keys)}")
if self.cache.changed():
self.cache.save()
self.last_refresh_time = time.time()
self.log.debug(
f"_refresh_task: total_result.loaded {len(total_result.loaded)}, "
f"total_result.removed {len(total_result.removed)}, "
f"total_duration {total_result.duration:.2f} seconds"
)
except Exception as e:
log.error(f"_refresh_callback raised: {e} with the traceback: {traceback.format_exc()}")
self.reset()
def refresh_batch(self, plot_paths: List[Path], plot_directories: Set[Path]) -> PlotRefreshResult:
start_time: float = time.time()
result: PlotRefreshResult = PlotRefreshResult(processed=len(plot_paths))
counter_lock = threading.Lock()
log.debug(f"refresh_batch: {len(plot_paths)} files in directories {plot_directories}")
if self.match_str is not None:
log.info(f'Only loading plots that contain "{self.match_str}" in the file or directory name')
def process_file(file_path: Path) -> Optional[PlotInfo]:
if not self._refreshing_enabled:
return None
filename_str = str(file_path)
if self.match_str is not None and self.match_str not in filename_str:
return None
if (
file_path in self.failed_to_open_filenames
and (time.time() - self.failed_to_open_filenames[file_path])
< self.refresh_parameter.retry_invalid_seconds
):
# Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file
return None
if file_path in self.plots:
return self.plots[file_path]
entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if entry is not None:
loaded_parent, duplicates = entry
if str(file_path.parent) in duplicates:
log.debug(f"Skip duplicated plot {str(file_path)}")
return None
try:
if not file_path.exists():
return None
prover = DiskProver(str(file_path))
log.debug(f"process_file {str(file_path)}")
expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
stat_info = file_path.stat()
# TODO: consider checking if the file was just written to (which would mean that the file is still
# being copied). A segfault might happen in this edge case.
if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size:
log.warning(
f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected"
f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied."
)
return None
cache_entry = self.cache.get(prover.get_id())
if cache_entry is None:
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(prover.get_memo())
# Only use plots that correct keys associated with them
if farmer_public_key not in self.farmer_public_keys:
log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
pool_public_key: Optional[G1Element] = None
pool_contract_puzzle_hash: Optional[bytes32] = None
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
pool_public_key = pool_public_key_or_puzzle_hash
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash
if pool_public_key is not None and pool_public_key not in self.pool_public_keys:
log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
local_sk = master_sk_to_local_sk(local_master_sk)
plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(
local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None
)
cache_entry = CacheEntry(pool_public_key, pool_contract_puzzle_hash, plot_public_key)
self.cache.update(prover.get_id(), cache_entry)
with self.plot_filename_paths_lock:
paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if paths is None:
paths = (str(Path(prover.get_filename()).parent), set())
self.plot_filename_paths[file_path.name] = paths
else:
paths[1].add(str(Path(prover.get_filename()).parent))
log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.")
return None
new_plot_info: PlotInfo = PlotInfo(
prover,
cache_entry.pool_public_key,
cache_entry.pool_contract_puzzle_hash,
cache_entry.plot_public_key,
stat_info.st_size,
stat_info.st_mtime,
)
with counter_lock:
result.loaded.append(new_plot_info)
if file_path in self.failed_to_open_filenames:
del self.failed_to_open_filenames[file_path]
except Exception as e:
tb = traceback.format_exc()
log.error(f"Failed to open file {file_path}. {e} {tb}")
self.failed_to_open_filenames[file_path] = int(time.time())
return None
log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}")
if self.show_memo:
plot_memo: bytes32
if pool_contract_puzzle_hash is None:
plot_memo = stream_plot_info_pk(pool_public_key, farmer_public_key, local_master_sk)
else:
plot_memo = stream_plot_info_ph(pool_contract_puzzle_hash, farmer_public_key, local_master_sk)
plot_memo_str: str = plot_memo.hex()
log.info(f"Memo: {plot_memo_str}")
return new_plot_info
with self, ThreadPoolExecutor() as executor:
plots_refreshed: Dict[Path, PlotInfo] = {}
for new_plot in executor.map(process_file, plot_paths):
if new_plot is not None:
plots_refreshed[Path(new_plot.prover.get_filename())] = new_plot
self.plots.update(plots_refreshed)
result.duration = time.time() - start_time
self.log.debug(
f"refresh_batch: loaded {len(result.loaded)}, "
f"removed {len(result.removed)}, processed {result.processed}, "
f"remaining {result.remaining}, batch_size {self.refresh_parameter.batch_size}, "
f"duration: {result.duration:.2f} seconds"
)
return result
|
http_server.py | import threading
from collections import defaultdict
from http import HTTPStatus
from http.server import HTTPServer, BaseHTTPRequestHandler
from urllib.parse import urlparse
import pytest
class TestHandler(BaseHTTPRequestHandler):
handlers = defaultdict(dict)
@classmethod
def handler(cls, method, path):
def inner(func):
cls.handlers[method][path] = func
return func
return inner
def do_generic(self):
parse_result = urlparse(self.path)
func = self.handlers[self.command].get(parse_result.path)
if func is None:
return self.send_error(HTTPStatus.NOT_FOUND)
return func(self)
do_GET = do_generic
do_POST = do_generic
@TestHandler.handler('GET', '/headers')
def get_headers(handler):
handler.send_response(200)
for key, value in handler.headers.items():
handler.send_header(key, value)
handler.send_header('Content-Length', 0)
handler.end_headers()
@TestHandler.handler('GET', '/drip')
def chunked_drip(handler):
handler.send_response(200)
accept = handler.headers.get('Accept')
if accept is not None:
handler.send_header('Content-Type', accept)
handler.send_header('Transfer-Encoding', 'chunked')
handler.end_headers()
for _ in range(3):
body = 'test\n'
handler.wfile.write(f'{len(body):X}\r\n{body}\r\n'.encode('utf-8'))
handler.wfile.write('0\r\n\r\n'.encode('utf-8'))
@TestHandler.handler('GET', '/stream/encoding/random')
def random_encoding(handler):
from tests.fixtures import ASCII_FILE_CONTENT, FILE_CONTENT as UNICODE_FILE_CONTENT
handler.send_response(200)
handler.send_header('Transfer-Encoding', 'chunked')
handler.end_headers()
for body in [
ASCII_FILE_CONTENT,
ASCII_FILE_CONTENT,
UNICODE_FILE_CONTENT,
UNICODE_FILE_CONTENT,
UNICODE_FILE_CONTENT,
]:
body += "\n"
handler.wfile.write(f'{len(body.encode()):X}\r\n{body}\r\n'.encode())
handler.wfile.write('0\r\n\r\n'.encode('utf-8'))
@TestHandler.handler('POST', '/status/msg')
def status_custom_msg(handler):
content_len = int(handler.headers.get('content-length', 0))
post_body = handler.rfile.read(content_len).decode()
handler.send_response(200, post_body)
handler.end_headers()
@pytest.fixture(scope="function")
def http_server():
"""A custom HTTP server implementation for our tests, that is
built on top of the http.server module. Handy when we need to
deal with details which httpbin can not capture."""
server = HTTPServer(('localhost', 0), TestHandler)
thread = threading.Thread(target=server.serve_forever)
thread.start()
yield '{}:{}'.format(*server.socket.getsockname())
server.shutdown()
thread.join(timeout=0.5)
|
test.py | #!/usr/bin/env python
#
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import imp
import optparse
import os
from os.path import join, dirname, abspath, basename, isdir, exists
import platform
import re
import signal
import subprocess
import sys
import tempfile
import time
import threading
from Queue import Queue, Empty
sys.path.append(dirname(__file__) + "/../deps/v8/tools");
import utils
VERBOSE = False
# ---------------------------------------------
# --- P r o g r e s s I n d i c a t o r s ---
# ---------------------------------------------
class ProgressIndicator(object):
def __init__(self, cases):
self.cases = cases
self.queue = Queue(len(cases))
for case in cases:
self.queue.put_nowait(case)
self.succeeded = 0
self.remaining = len(cases)
self.total = len(cases)
self.failed = [ ]
self.crashed = 0
self.terminate = False
self.lock = threading.Lock()
def PrintFailureHeader(self, test):
if test.IsNegative():
negative_marker = '[negative] '
else:
negative_marker = ''
print "=== %(label)s %(negative)s===" % {
'label': test.GetLabel(),
'negative': negative_marker
}
print "Path: %s" % "/".join(test.path)
def Run(self, tasks):
self.Starting()
threads = []
# Spawn N-1 threads and then use this thread as the last one.
# That way -j1 avoids threading altogether which is a nice fallback
# in case of threading problems.
for i in xrange(tasks - 1):
thread = threading.Thread(target=self.RunSingle, args=[])
threads.append(thread)
thread.start()
try:
self.RunSingle()
# Wait for the remaining threads
for thread in threads:
# Use a timeout so that signals (ctrl-c) will be processed.
thread.join(timeout=10000000)
except Exception, e:
# If there's an exception we schedule an interruption for any
# remaining threads.
self.terminate = True
# ...and then reraise the exception to bail out
raise
self.Done()
return not self.failed
def RunSingle(self):
while not self.terminate:
try:
test = self.queue.get_nowait()
except Empty:
return
case = test.case
self.lock.acquire()
self.AboutToRun(case)
self.lock.release()
try:
start = time.time()
output = case.Run()
case.duration = (time.time() - start)
except IOError, e:
assert self.terminate
return
if self.terminate:
return
self.lock.acquire()
if output.UnexpectedOutput():
self.failed.append(output)
if output.HasCrashed():
self.crashed += 1
else:
self.succeeded += 1
self.remaining -= 1
self.HasRun(output)
self.lock.release()
def EscapeCommand(command):
parts = []
for part in command:
if ' ' in part:
# Escape spaces. We may need to escape more characters for this
# to work properly.
parts.append('"%s"' % part)
else:
parts.append(part)
return " ".join(parts)
class SimpleProgressIndicator(ProgressIndicator):
def Starting(self):
print 'Running %i tests' % len(self.cases)
def Done(self):
print
for failed in self.failed:
self.PrintFailureHeader(failed.test)
if failed.output.stderr:
print "--- stderr ---"
print failed.output.stderr.strip()
if failed.output.stdout:
print "--- stdout ---"
print failed.output.stdout.strip()
print "Command: %s" % EscapeCommand(failed.command)
if failed.HasCrashed():
print "--- CRASHED ---"
if failed.HasTimedOut():
print "--- TIMEOUT ---"
if len(self.failed) == 0:
print "==="
print "=== All tests succeeded"
print "==="
else:
print
print "==="
print "=== %i tests failed" % len(self.failed)
if self.crashed > 0:
print "=== %i tests CRASHED" % self.crashed
print "==="
class VerboseProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
print 'Starting %s...' % case.GetLabel()
sys.stdout.flush()
def HasRun(self, output):
if output.UnexpectedOutput():
if output.HasCrashed():
outcome = 'CRASH'
else:
outcome = 'FAIL'
else:
outcome = 'pass'
print 'Done running %s: %s' % (output.test.GetLabel(), outcome)
class DotsProgressIndicator(SimpleProgressIndicator):
def AboutToRun(self, case):
pass
def HasRun(self, output):
total = self.succeeded + len(self.failed)
if (total > 1) and (total % 50 == 1):
sys.stdout.write('\n')
if output.UnexpectedOutput():
if output.HasCrashed():
sys.stdout.write('C')
sys.stdout.flush()
elif output.HasTimedOut():
sys.stdout.write('T')
sys.stdout.flush()
else:
sys.stdout.write('F')
sys.stdout.flush()
else:
sys.stdout.write('.')
sys.stdout.flush()
class CompactProgressIndicator(ProgressIndicator):
def __init__(self, cases, templates):
super(CompactProgressIndicator, self).__init__(cases)
self.templates = templates
self.last_status_length = 0
self.start_time = time.time()
def Starting(self):
pass
def Done(self):
self.PrintProgress('Done')
def AboutToRun(self, case):
self.PrintProgress(case.GetLabel())
def HasRun(self, output):
if output.UnexpectedOutput():
self.ClearLine(self.last_status_length)
self.PrintFailureHeader(output.test)
stdout = output.output.stdout.strip()
if len(stdout):
print self.templates['stdout'] % stdout
stderr = output.output.stderr.strip()
if len(stderr):
print self.templates['stderr'] % stderr
print "Command: %s" % EscapeCommand(output.command)
if output.HasCrashed():
print "--- CRASHED ---"
if output.HasTimedOut():
print "--- TIMEOUT ---"
def Truncate(self, str, length):
if length and (len(str) > (length - 3)):
return str[:(length-3)] + "..."
else:
return str
def PrintProgress(self, name):
self.ClearLine(self.last_status_length)
elapsed = time.time() - self.start_time
status = self.templates['status_line'] % {
'passed': self.succeeded,
'remaining': (((self.total - self.remaining) * 100) // self.total),
'failed': len(self.failed),
'test': name,
'mins': int(elapsed) / 60,
'secs': int(elapsed) % 60
}
status = self.Truncate(status, 78)
self.last_status_length = len(status)
print status,
sys.stdout.flush()
class ColorProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|\033[34m%%%(remaining) 4d\033[0m|\033[32m+%(passed) 4d\033[0m|\033[31m-%(failed) 4d\033[0m]: %(test)s",
'stdout': "\033[1m%s\033[0m",
'stderr': "\033[31m%s\033[0m",
}
super(ColorProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print "\033[1K\r",
class MonochromeProgressIndicator(CompactProgressIndicator):
def __init__(self, cases):
templates = {
'status_line': "[%(mins)02i:%(secs)02i|%%%(remaining) 4d|+%(passed) 4d|-%(failed) 4d]: %(test)s",
'stdout': '%s',
'stderr': '%s',
'clear': lambda last_line_length: ("\r" + (" " * last_line_length) + "\r"),
'max_length': 78
}
super(MonochromeProgressIndicator, self).__init__(cases, templates)
def ClearLine(self, last_line_length):
print ("\r" + (" " * last_line_length) + "\r"),
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
'color': ColorProgressIndicator,
'mono': MonochromeProgressIndicator
}
# -------------------------
# --- F r a m e w o r k ---
# -------------------------
class CommandOutput(object):
def __init__(self, exit_code, timed_out, stdout, stderr):
self.exit_code = exit_code
self.timed_out = timed_out
self.stdout = stdout
self.stderr = stderr
self.failed = None
class TestCase(object):
def __init__(self, context, path, mode):
self.path = path
self.context = context
self.duration = None
self.mode = mode
def IsNegative(self):
return False
def CompareTime(self, other):
return cmp(other.duration, self.duration)
def DidFail(self, output):
if output.failed is None:
output.failed = self.IsFailureOutput(output)
return output.failed
def IsFailureOutput(self, output):
return output.exit_code != 0
def GetSource(self):
return "(no source available)"
def RunCommand(self, command):
full_command = self.context.processor(command)
output = Execute(full_command,
self.context,
self.context.GetTimeout(self.mode))
self.Cleanup()
return TestOutput(self,
full_command,
output,
self.context.store_unexpected_output)
def BeforeRun(self):
pass
def AfterRun(self, result):
pass
def Run(self):
self.BeforeRun()
try:
result = self.RunCommand(self.GetCommand())
finally:
self.AfterRun(result)
return result
def Cleanup(self):
return
class TestOutput(object):
def __init__(self, test, command, output, store_unexpected_output):
self.test = test
self.command = command
self.output = output
self.store_unexpected_output = store_unexpected_output
def UnexpectedOutput(self):
if self.HasCrashed():
outcome = CRASH
elif self.HasTimedOut():
outcome = TIMEOUT
elif self.HasFailed():
outcome = FAIL
else:
outcome = PASS
return not outcome in self.test.outcomes
def HasPreciousOutput(self):
return self.UnexpectedOutput() and self.store_unexpected_output
def HasCrashed(self):
if utils.IsWindows():
return 0x80000000 & self.output.exit_code and not (0x3FFFFF00 & self.output.exit_code)
else:
# Timed out tests will have exit_code -signal.SIGTERM.
if self.output.timed_out:
return False
return self.output.exit_code < 0 and \
self.output.exit_code != -signal.SIGABRT
def HasTimedOut(self):
return self.output.timed_out;
def HasFailed(self):
execution_failed = self.test.DidFail(self.output)
if self.test.IsNegative():
return not execution_failed
else:
return execution_failed
def KillProcessWithID(pid):
if utils.IsWindows():
os.popen('taskkill /T /F /PID %d' % pid)
else:
os.kill(pid, signal.SIGTERM)
MAX_SLEEP_TIME = 0.1
INITIAL_SLEEP_TIME = 0.0001
SLEEP_TIME_FACTOR = 1.25
SEM_INVALID_VALUE = -1
SEM_NOGPFAULTERRORBOX = 0x0002 # Microsoft Platform SDK WinBase.h
def Win32SetErrorMode(mode):
prev_error_mode = SEM_INVALID_VALUE
try:
import ctypes
prev_error_mode = ctypes.windll.kernel32.SetErrorMode(mode);
except ImportError:
pass
return prev_error_mode
def RunProcess(context, timeout, args, **rest):
if context.verbose: print "#", " ".join(args)
popen_args = args
prev_error_mode = SEM_INVALID_VALUE;
if utils.IsWindows():
if context.suppress_dialogs:
# Try to change the error mode to avoid dialogs on fatal errors. Don't
# touch any existing error mode flags by merging the existing error mode.
# See http://blogs.msdn.com/oldnewthing/archive/2004/07/27/198410.aspx.
error_mode = SEM_NOGPFAULTERRORBOX;
prev_error_mode = Win32SetErrorMode(error_mode);
Win32SetErrorMode(error_mode | prev_error_mode);
process = subprocess.Popen(
shell = utils.IsWindows(),
args = popen_args,
**rest
)
if utils.IsWindows() and context.suppress_dialogs and prev_error_mode != SEM_INVALID_VALUE:
Win32SetErrorMode(prev_error_mode)
# Compute the end time - if the process crosses this limit we
# consider it timed out.
if timeout is None: end_time = None
else: end_time = time.time() + timeout
timed_out = False
# Repeatedly check the exit code from the process in a
# loop and keep track of whether or not it times out.
exit_code = None
sleep_time = INITIAL_SLEEP_TIME
while exit_code is None:
if (not end_time is None) and (time.time() >= end_time):
# Kill the process and wait for it to exit.
KillProcessWithID(process.pid)
exit_code = process.wait()
timed_out = True
else:
exit_code = process.poll()
time.sleep(sleep_time)
sleep_time = sleep_time * SLEEP_TIME_FACTOR
if sleep_time > MAX_SLEEP_TIME:
sleep_time = MAX_SLEEP_TIME
return (process, exit_code, timed_out)
def PrintError(str):
sys.stderr.write(str)
sys.stderr.write('\n')
def CheckedUnlink(name):
try:
os.unlink(name)
except OSError, e:
PrintError("os.unlink() " + str(e))
def Execute(args, context, timeout=None):
(fd_out, outname) = tempfile.mkstemp()
(fd_err, errname) = tempfile.mkstemp()
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
stdout = fd_out,
stderr = fd_err,
)
os.close(fd_out)
os.close(fd_err)
output = file(outname).read()
errors = file(errname).read()
CheckedUnlink(outname)
CheckedUnlink(errname)
return CommandOutput(exit_code, timed_out, output, errors)
def ExecuteNoCapture(args, context, timeout=None):
(process, exit_code, timed_out) = RunProcess(
context,
timeout,
args = args,
)
return CommandOutput(exit_code, False, "", "")
def CarCdr(path):
if len(path) == 0:
return (None, [ ])
else:
return (path[0], path[1:])
class TestConfiguration(object):
def __init__(self, context, root):
self.context = context
self.root = root
def Contains(self, path, file):
if len(path) > len(file):
return False
for i in xrange(len(path)):
if not path[i].match(file[i]):
return False
return True
def GetTestStatus(self, sections, defs):
pass
class TestSuite(object):
def __init__(self, name):
self.name = name
def GetName(self):
return self.name
# Use this to run several variants of the tests, e.g.:
# VARIANT_FLAGS = [[], ['--always_compact', '--noflush_code']]
VARIANT_FLAGS = [[]]
class TestRepository(TestSuite):
def __init__(self, path):
normalized_path = abspath(path)
super(TestRepository, self).__init__(basename(normalized_path))
self.path = normalized_path
self.is_loaded = False
self.config = None
def GetConfiguration(self, context):
if self.is_loaded:
return self.config
self.is_loaded = True
file = None
try:
(file, pathname, description) = imp.find_module('testcfg', [ self.path ])
module = imp.load_module('testcfg', file, pathname, description)
self.config = module.GetConfiguration(context, self.path)
finally:
if file:
file.close()
return self.config
def GetBuildRequirements(self, path, context):
return self.GetConfiguration(context).GetBuildRequirements()
def AddTestsToList(self, result, current_path, path, context, mode):
for v in VARIANT_FLAGS:
tests = self.GetConfiguration(context).ListTests(current_path, path, mode)
for t in tests: t.variant_flags = v
result += tests
def GetTestStatus(self, context, sections, defs):
self.GetConfiguration(context).GetTestStatus(sections, defs)
class LiteralTestSuite(TestSuite):
def __init__(self, tests):
super(LiteralTestSuite, self).__init__('root')
self.tests = tests
def GetBuildRequirements(self, path, context):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
if not name or name.match(test.GetName()):
result += test.GetBuildRequirements(rest, context)
return result
def ListTests(self, current_path, path, context, mode):
(name, rest) = CarCdr(path)
result = [ ]
for test in self.tests:
test_name = test.GetName()
if not name or name.match(test_name):
full_path = current_path + [test_name]
test.AddTestsToList(result, full_path, path, context, mode)
return result
def GetTestStatus(self, context, sections, defs):
for test in self.tests:
test.GetTestStatus(context, sections, defs)
SUFFIX = {
'debug' : '_g',
'release' : '' }
FLAGS = {
'debug' : ['--enable-slow-asserts', '--debug-code', '--verify-heap'],
'release' : []}
TIMEOUT_SCALEFACTOR = {
'debug' : 4,
'release' : 1 }
class Context(object):
def __init__(self, workspace, buildspace, verbose, vm, timeout, processor, suppress_dialogs, store_unexpected_output):
self.workspace = workspace
self.buildspace = buildspace
self.verbose = verbose
self.vm_root = vm
self.timeout = timeout
self.processor = processor
self.suppress_dialogs = suppress_dialogs
self.store_unexpected_output = store_unexpected_output
def GetVm(self, mode):
if mode == 'debug':
name = 'build/debug/node_g'
else:
name = 'build/default/node'
if utils.IsWindows() and not name.endswith('.exe'):
name = os.path.abspath(name + '.exe')
return name
def GetVmCommand(self, testcase, mode):
return [self.GetVm(mode)] + self.GetVmFlags(testcase, mode)
def GetVmFlags(self, testcase, mode):
return testcase.variant_flags + FLAGS[mode]
def GetTimeout(self, mode):
return self.timeout * TIMEOUT_SCALEFACTOR[mode]
def RunTestCases(cases_to_run, progress, tasks):
progress = PROGRESS_INDICATORS[progress](cases_to_run)
return progress.Run(tasks)
def BuildRequirements(context, requirements, mode, scons_flags):
command_line = (['scons', '-Y', context.workspace, 'mode=' + ",".join(mode)]
+ requirements
+ scons_flags)
output = ExecuteNoCapture(command_line, context)
return output.exit_code == 0
# -------------------------------------------
# --- T e s t C o n f i g u r a t i o n ---
# -------------------------------------------
SKIP = 'skip'
FAIL = 'fail'
PASS = 'pass'
OKAY = 'okay'
TIMEOUT = 'timeout'
CRASH = 'crash'
SLOW = 'slow'
class Expression(object):
pass
class Constant(Expression):
def __init__(self, value):
self.value = value
def Evaluate(self, env, defs):
return self.value
class Variable(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in env: return ListSet([env[self.name]])
else: return Nothing()
class Outcome(Expression):
def __init__(self, name):
self.name = name
def GetOutcomes(self, env, defs):
if self.name in defs:
return defs[self.name].GetOutcomes(env, defs)
else:
return ListSet([self.name])
class Set(object):
pass
class ListSet(Set):
def __init__(self, elms):
self.elms = elms
def __str__(self):
return "ListSet%s" % str(self.elms)
def Intersect(self, that):
if not isinstance(that, ListSet):
return that.Intersect(self)
return ListSet([ x for x in self.elms if x in that.elms ])
def Union(self, that):
if not isinstance(that, ListSet):
return that.Union(self)
return ListSet(self.elms + [ x for x in that.elms if x not in self.elms ])
def IsEmpty(self):
return len(self.elms) == 0
class Everything(Set):
def Intersect(self, that):
return that
def Union(self, that):
return self
def IsEmpty(self):
return False
class Nothing(Set):
def Intersect(self, that):
return self
def Union(self, that):
return that
def IsEmpty(self):
return True
class Operation(Expression):
def __init__(self, left, op, right):
self.left = left
self.op = op
self.right = right
def Evaluate(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.Evaluate(env, defs) or self.right.Evaluate(env, defs)
elif self.op == 'if':
return False
elif self.op == '==':
inter = self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
return not inter.IsEmpty()
else:
assert self.op == '&&'
return self.left.Evaluate(env, defs) and self.right.Evaluate(env, defs)
def GetOutcomes(self, env, defs):
if self.op == '||' or self.op == ',':
return self.left.GetOutcomes(env, defs).Union(self.right.GetOutcomes(env, defs))
elif self.op == 'if':
if self.right.Evaluate(env, defs): return self.left.GetOutcomes(env, defs)
else: return Nothing()
else:
assert self.op == '&&'
return self.left.GetOutcomes(env, defs).Intersect(self.right.GetOutcomes(env, defs))
def IsAlpha(str):
for char in str:
if not (char.isalpha() or char.isdigit() or char == '_'):
return False
return True
class Tokenizer(object):
"""A simple string tokenizer that chops expressions into variables,
parens and operators"""
def __init__(self, expr):
self.index = 0
self.expr = expr
self.length = len(expr)
self.tokens = None
def Current(self, length = 1):
if not self.HasMore(length): return ""
return self.expr[self.index:self.index+length]
def HasMore(self, length = 1):
return self.index < self.length + (length - 1)
def Advance(self, count = 1):
self.index = self.index + count
def AddToken(self, token):
self.tokens.append(token)
def SkipSpaces(self):
while self.HasMore() and self.Current().isspace():
self.Advance()
def Tokenize(self):
self.tokens = [ ]
while self.HasMore():
self.SkipSpaces()
if not self.HasMore():
return None
if self.Current() == '(':
self.AddToken('(')
self.Advance()
elif self.Current() == ')':
self.AddToken(')')
self.Advance()
elif self.Current() == '$':
self.AddToken('$')
self.Advance()
elif self.Current() == ',':
self.AddToken(',')
self.Advance()
elif IsAlpha(self.Current()):
buf = ""
while self.HasMore() and IsAlpha(self.Current()):
buf += self.Current()
self.Advance()
self.AddToken(buf)
elif self.Current(2) == '&&':
self.AddToken('&&')
self.Advance(2)
elif self.Current(2) == '||':
self.AddToken('||')
self.Advance(2)
elif self.Current(2) == '==':
self.AddToken('==')
self.Advance(2)
else:
return None
return self.tokens
class Scanner(object):
"""A simple scanner that can serve out tokens from a given list"""
def __init__(self, tokens):
self.tokens = tokens
self.length = len(tokens)
self.index = 0
def HasMore(self):
return self.index < self.length
def Current(self):
return self.tokens[self.index]
def Advance(self):
self.index = self.index + 1
def ParseAtomicExpression(scan):
if scan.Current() == "true":
scan.Advance()
return Constant(True)
elif scan.Current() == "false":
scan.Advance()
return Constant(False)
elif IsAlpha(scan.Current()):
name = scan.Current()
scan.Advance()
return Outcome(name.lower())
elif scan.Current() == '$':
scan.Advance()
if not IsAlpha(scan.Current()):
return None
name = scan.Current()
scan.Advance()
return Variable(name.lower())
elif scan.Current() == '(':
scan.Advance()
result = ParseLogicalExpression(scan)
if (not result) or (scan.Current() != ')'):
return None
scan.Advance()
return result
else:
return None
BINARIES = ['==']
def ParseOperatorExpression(scan):
left = ParseAtomicExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in BINARIES):
op = scan.Current()
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseConditionalExpression(scan):
left = ParseOperatorExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() == 'if'):
scan.Advance()
right = ParseOperatorExpression(scan)
if not right:
return None
left= Operation(left, 'if', right)
return left
LOGICALS = ["&&", "||", ","]
def ParseLogicalExpression(scan):
left = ParseConditionalExpression(scan)
if not left: return None
while scan.HasMore() and (scan.Current() in LOGICALS):
op = scan.Current()
scan.Advance()
right = ParseConditionalExpression(scan)
if not right:
return None
left = Operation(left, op, right)
return left
def ParseCondition(expr):
"""Parses a logical expression into an Expression object"""
tokens = Tokenizer(expr).Tokenize()
if not tokens:
print "Malformed expression: '%s'" % expr
return None
scan = Scanner(tokens)
ast = ParseLogicalExpression(scan)
if not ast:
print "Malformed expression: '%s'" % expr
return None
if scan.HasMore():
print "Malformed expression: '%s'" % expr
return None
return ast
class ClassifiedTest(object):
def __init__(self, case, outcomes):
self.case = case
self.outcomes = outcomes
class Configuration(object):
"""The parsed contents of a configuration file"""
def __init__(self, sections, defs):
self.sections = sections
self.defs = defs
def ClassifyTests(self, cases, env):
sections = [s for s in self.sections if s.condition.Evaluate(env, self.defs)]
all_rules = reduce(list.__add__, [s.rules for s in sections], [])
unused_rules = set(all_rules)
result = [ ]
all_outcomes = set([])
for case in cases:
matches = [ r for r in all_rules if r.Contains(case.path) ]
outcomes = set([])
for rule in matches:
outcomes = outcomes.union(rule.GetOutcomes(env, self.defs))
unused_rules.discard(rule)
if not outcomes:
outcomes = [PASS]
case.outcomes = outcomes
all_outcomes = all_outcomes.union(outcomes)
result.append(ClassifiedTest(case, outcomes))
return (result, list(unused_rules), all_outcomes)
class Section(object):
"""A section of the configuration file. Sections are enabled or
disabled prior to running the tests, based on their conditions"""
def __init__(self, condition):
self.condition = condition
self.rules = [ ]
def AddRule(self, rule):
self.rules.append(rule)
class Rule(object):
"""A single rule that specifies the expected outcome for a single
test."""
def __init__(self, raw_path, path, value):
self.raw_path = raw_path
self.path = path
self.value = value
def GetOutcomes(self, env, defs):
set = self.value.GetOutcomes(env, defs)
assert isinstance(set, ListSet)
return set.elms
def Contains(self, path):
if len(self.path) > len(path):
return False
for i in xrange(len(self.path)):
if not self.path[i].match(path[i]):
return False
return True
HEADER_PATTERN = re.compile(r'\[([^]]+)\]')
RULE_PATTERN = re.compile(r'\s*([^: ]*)\s*:(.*)')
DEF_PATTERN = re.compile(r'^def\s*(\w+)\s*=(.*)$')
PREFIX_PATTERN = re.compile(r'^\s*prefix\s+([\w\_\.\-\/]+)$')
def ReadConfigurationInto(path, sections, defs):
current_section = Section(Constant(True))
sections.append(current_section)
prefix = []
for line in utils.ReadLinesFrom(path):
header_match = HEADER_PATTERN.match(line)
if header_match:
condition_str = header_match.group(1).strip()
condition = ParseCondition(condition_str)
new_section = Section(condition)
sections.append(new_section)
current_section = new_section
continue
rule_match = RULE_PATTERN.match(line)
if rule_match:
path = prefix + SplitPath(rule_match.group(1).strip())
value_str = rule_match.group(2).strip()
value = ParseCondition(value_str)
if not value:
return False
current_section.AddRule(Rule(rule_match.group(1), path, value))
continue
def_match = DEF_PATTERN.match(line)
if def_match:
name = def_match.group(1).lower()
value = ParseCondition(def_match.group(2).strip())
if not value:
return False
defs[name] = value
continue
prefix_match = PREFIX_PATTERN.match(line)
if prefix_match:
prefix = SplitPath(prefix_match.group(1).strip())
continue
print "Malformed line: '%s'." % line
return False
return True
# ---------------
# --- M a i n ---
# ---------------
ARCH_GUESS = utils.GuessArchitecture()
def BuildOptions():
result = optparse.OptionParser()
result.add_option("-m", "--mode", help="The test modes in which to run (comma-separated)",
default='release')
result.add_option("-v", "--verbose", help="Verbose output",
default=False, action="store_true")
result.add_option("-S", dest="scons_flags", help="Flag to pass through to scons",
default=[], action="append")
result.add_option("-p", "--progress",
help="The style of progress indicator (verbose, dots, color, mono)",
choices=PROGRESS_INDICATORS.keys(), default="mono")
result.add_option("--no-build", help="Don't build requirements",
default=True, action="store_true")
result.add_option("--build-only", help="Only build requirements, don't run the tests",
default=False, action="store_true")
result.add_option("--report", help="Print a summary of the tests to be run",
default=False, action="store_true")
result.add_option("-s", "--suite", help="A test suite",
default=[], action="append")
result.add_option("-t", "--timeout", help="Timeout in seconds",
default=60, type="int")
result.add_option("--arch", help='The architecture to run tests for',
default='none')
result.add_option("--snapshot", help="Run the tests with snapshot turned on",
default=False, action="store_true")
result.add_option("--simulator", help="Run tests with architecture simulator",
default='none')
result.add_option("--special-command", default=None)
result.add_option("--valgrind", help="Run tests through valgrind",
default=False, action="store_true")
result.add_option("--cat", help="Print the source of the tests",
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
result.add_option("-j", help="The number of parallel tasks to run",
default=1, type="int")
result.add_option("--time", help="Print timing information after running",
default=False, action="store_true")
result.add_option("--suppress-dialogs", help="Suppress Windows dialogs for crashing tests",
dest="suppress_dialogs", default=True, action="store_true")
result.add_option("--no-suppress-dialogs", help="Display Windows dialogs for crashing tests",
dest="suppress_dialogs", action="store_false")
result.add_option("--shell", help="Path to V8 shell", default="shell")
result.add_option("--store-unexpected-output",
help="Store the temporary JS files from tests that fails",
dest="store_unexpected_output", default=True, action="store_true")
result.add_option("--no-store-unexpected-output",
help="Deletes the temporary JS files from tests that fails",
dest="store_unexpected_output", action="store_false")
return result
def ProcessOptions(options):
global VERBOSE
VERBOSE = options.verbose
options.mode = options.mode.split(',')
for mode in options.mode:
if not mode in ['debug', 'release']:
print "Unknown mode %s" % mode
return False
if options.simulator != 'none':
# Simulator argument was set. Make sure arch and simulator agree.
if options.simulator != options.arch:
if options.arch == 'none':
options.arch = options.simulator
else:
print "Architecture %s does not match sim %s" %(options.arch, options.simulator)
return False
# Ensure that the simulator argument is handed down to scons.
options.scons_flags.append("simulator=" + options.simulator)
else:
# If options.arch is not set by the command line and no simulator setting
# was found, set the arch to the guess.
if options.arch == 'none':
options.arch = ARCH_GUESS
options.scons_flags.append("arch=" + options.arch)
if options.snapshot:
options.scons_flags.append("snapshot=on")
return True
REPORT_TEMPLATE = """\
Total: %(total)i tests
* %(skipped)4d tests will be skipped
* %(nocrash)4d tests are expected to be flaky but not crash
* %(pass)4d tests are expected to pass
* %(fail_ok)4d tests are expected to fail that we won't fix
* %(fail)4d tests are expected to fail that we should fix\
"""
def PrintReport(cases):
def IsFlaky(o):
return (PASS in o) and (FAIL in o) and (not CRASH in o) and (not OKAY in o)
def IsFailOk(o):
return (len(o) == 2) and (FAIL in o) and (OKAY in o)
unskipped = [c for c in cases if not SKIP in c.outcomes]
print REPORT_TEMPLATE % {
'total': len(cases),
'skipped': len(cases) - len(unskipped),
'nocrash': len([t for t in unskipped if IsFlaky(t.outcomes)]),
'pass': len([t for t in unskipped if list(t.outcomes) == [PASS]]),
'fail_ok': len([t for t in unskipped if IsFailOk(t.outcomes)]),
'fail': len([t for t in unskipped if list(t.outcomes) == [FAIL]])
}
class Pattern(object):
def __init__(self, pattern):
self.pattern = pattern
self.compiled = None
def match(self, str):
if not self.compiled:
pattern = "^" + self.pattern.replace('*', '.*') + "$"
self.compiled = re.compile(pattern)
return self.compiled.match(str)
def __str__(self):
return self.pattern
def SplitPath(s):
stripped = [ c.strip() for c in s.split('/') ]
return [ Pattern(s) for s in stripped if len(s) > 0 ]
def GetSpecialCommandProcessor(value):
if (not value) or (value.find('@') == -1):
def ExpandCommand(args):
return args
return ExpandCommand
else:
pos = value.find('@')
import urllib
prefix = urllib.unquote(value[:pos]).split()
suffix = urllib.unquote(value[pos+1:]).split()
def ExpandCommand(args):
return prefix + args + suffix
return ExpandCommand
BUILT_IN_TESTS = ['simple', 'pummel', 'message', 'internet']
def GetSuites(test_root):
def IsSuite(path):
return isdir(path) and exists(join(path, 'testcfg.py'))
return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
def FormatTime(d):
millis = round(d * 1000) % 1000
return time.strftime("%M:%S.", time.gmtime(d)) + ("%03i" % millis)
def Main():
parser = BuildOptions()
(options, args) = parser.parse_args()
if not ProcessOptions(options):
parser.print_help()
return 1
workspace = abspath(join(dirname(sys.argv[0]), '..'))
suites = GetSuites(join(workspace, 'test'))
repositories = [TestRepository(join(workspace, 'test', name)) for name in suites]
repositories += [TestRepository(a) for a in options.suite]
root = LiteralTestSuite(repositories)
if len(args) == 0:
paths = [SplitPath(t) for t in BUILT_IN_TESTS]
else:
paths = [ ]
for arg in args:
path = SplitPath(arg)
paths.append(path)
# Check for --valgrind option. If enabled, we overwrite the special
# command flag with a command that uses the run-valgrind.py script.
if options.valgrind:
run_valgrind = join(workspace, "tools", "run-valgrind.py")
options.special_command = "python -u " + run_valgrind + " @"
shell = abspath(options.shell)
buildspace = dirname(shell)
context = Context(workspace, buildspace, VERBOSE,
shell,
options.timeout,
GetSpecialCommandProcessor(options.special_command),
options.suppress_dialogs,
options.store_unexpected_output)
# First build the required targets
if not options.no_build:
reqs = [ ]
for path in paths:
reqs += root.GetBuildRequirements(path, context)
reqs = list(set(reqs))
if len(reqs) > 0:
if options.j != 1:
options.scons_flags += ['-j', str(options.j)]
if not BuildRequirements(context, reqs, options.mode, options.scons_flags):
return 1
# Just return if we are only building the targets for running the tests.
if options.build_only:
return 0
# Get status for tests
sections = [ ]
defs = { }
root.GetTestStatus(context, sections, defs)
config = Configuration(sections, defs)
# List the tests
all_cases = [ ]
all_unused = [ ]
unclassified_tests = [ ]
globally_unused_rules = None
for path in paths:
for mode in options.mode:
if not exists(context.GetVm(mode)):
print "Can't find shell executable: '%s'" % context.GetVm(mode)
continue
env = {
'mode': mode,
'system': utils.GuessOS(),
'arch': options.arch,
'simulator': options.simulator
}
test_list = root.ListTests([], path, context, mode)
unclassified_tests += test_list
(cases, unused_rules, all_outcomes) = config.ClassifyTests(test_list, env)
if globally_unused_rules is None:
globally_unused_rules = set(unused_rules)
else:
globally_unused_rules = globally_unused_rules.intersection(unused_rules)
all_cases += cases
all_unused.append(unused_rules)
if options.cat:
visited = set()
for test in unclassified_tests:
key = tuple(test.path)
if key in visited:
continue
visited.add(key)
print "--- begin source: %s ---" % test.GetLabel()
source = test.GetSource().strip()
print source
print "--- end source: %s ---" % test.GetLabel()
return 0
if options.warn_unused:
for rule in globally_unused_rules:
print "Rule for '%s' was not used." % '/'.join([str(s) for s in rule.path])
if options.report:
PrintReport(all_cases)
result = None
def DoSkip(case):
return SKIP in case.outcomes or SLOW in case.outcomes
cases_to_run = [ c for c in all_cases if not DoSkip(c) ]
if len(cases_to_run) == 0:
print "No tests to run."
return 0
else:
try:
start = time.time()
if RunTestCases(cases_to_run, options.progress, options.j):
result = 0
else:
result = 1
duration = time.time() - start
except KeyboardInterrupt:
print "Interrupted"
return 1
if options.time:
# Write the times to stderr to make it easy to separate from the
# test output.
print
sys.stderr.write("--- Total time: %s ---\n" % FormatTime(duration))
timed_tests = [ t.case for t in cases_to_run if not t.case.duration is None ]
timed_tests.sort(lambda a, b: a.CompareTime(b))
index = 1
for entry in timed_tests[:20]:
t = FormatTime(entry.duration)
sys.stderr.write("%4i (%s) %s\n" % (index, t, entry.GetLabel()))
index += 1
return result
if __name__ == '__main__':
sys.exit(Main())
|
tcp_client.py | #!/usr/bin/env python
import sys
import logging
import socket
import struct
from threading import Event, Thread
from util import *
logger = logging.getLogger('client')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
STOP = Event()
def accept(port):
logger.info("accept %s", port)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
s.bind(('', port))
s.listen(1)
s.settimeout(5)
while not STOP.is_set():
try:
conn, addr = s.accept()
except socket.timeout:
continue
else:
logger.info("Accept %s connected!", port)
# STOP.set()
def connect(local_addr, addr):
logger.info("connect from %s to %s", local_addr, addr)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
s.bind(local_addr)
while not STOP.is_set():
try:
s.connect(addr)
except socket.error:
continue
# except Exception as exc:
# logger.exception("unexpected exception encountered")
# break
else:
logger.info("connected from %s to %s success!", local_addr, addr)
# STOP.set()
def main(host='54.187.46.146', port=5005):
sa = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sa.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sa.connect((host, port))
priv_addr = sa.getsockname()
send_msg(sa, addr_to_msg(priv_addr))
data = recv_msg(sa)
logger.info("client %s %s - received data: %s", priv_addr[0], priv_addr[1], data)
pub_addr = msg_to_addr(data)
send_msg(sa, addr_to_msg(pub_addr))
data = recv_msg(sa)
pubdata, privdata = data.split(b'|')
client_pub_addr = msg_to_addr(pubdata)
client_priv_addr = msg_to_addr(privdata)
logger.info(
"client public is %s and private is %s, peer public is %s private is %s",
pub_addr, priv_addr, client_pub_addr, client_priv_addr,
)
threads = {
'0_accept': Thread(target=accept, args=(priv_addr[1],)),
'1_accept': Thread(target=accept, args=(client_pub_addr[1],)),
'2_connect': Thread(target=connect, args=(priv_addr, client_pub_addr,)),
'3_connect': Thread(target=connect, args=(priv_addr, client_priv_addr,)),
}
for name in sorted(threads.keys()):
logger.info('start thread %s', name)
threads[name].start()
while threads:
keys = list(threads.keys())
for name in keys:
try:
threads[name].join(1)
except TimeoutError:
continue
if not threads[name].is_alive():
threads.pop(name)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, message='%(asctime)s %(message)s')
main(*addr_from_args(sys.argv))
|
test_basic.py | import asyncio
from mpdispatcher import MpDispatcher, Closed
from multiprocessing import Process
import multiprocessing as mp
import pytest
import sys
from time import sleep
@pytest.fixture()
def dispatcher():
return MpDispatcher()
def receive_single_event(receiver):
l = []
def cb(arg):
l.append(arg)
receiver.connect("cb", cb)
receiver.handle_next(timeout=2)
assert l == [54]
def test_handle_next_in_child_proc_with_timeout(dispatcher):
proc = Process(target=receive_single_event, args=[dispatcher.receiver])
proc.daemon = True
proc.start()
dispatcher.sender.fire("cb", 54)
proc.join(timeout=2)
assert not proc.is_alive()
assert proc.exitcode == 0
def receive_three_events_expecting_close(receiver):
l = []
def cb(arg):
l.append(arg)
receiver.connect("cb", cb)
receiver.handle_next(timeout=2)
receiver.handle_next(timeout=2)
with pytest.raises(Closed):
receiver.handle_next(timeout=2)
def test_handle_next_on_closed(dispatcher):
proc = Process(target=receive_three_events_expecting_close,
args=[dispatcher.receiver])
proc.daemon = True
proc.start()
dispatcher.sender.fire("cb", 54)
dispatcher.sender.close()
proc.join(timeout=2)
assert not proc.is_alive()
assert proc.exitcode == 0
def send_single_event(sender):
sender.fire("cb", 54)
def test_handle_next_in_parent_proc_with_timeout(dispatcher):
proc = Process(target=send_single_event, args=[dispatcher.sender])
l = []
def cb(arg):
l.append(arg)
dispatcher.receiver.connect("cb", cb)
proc.start()
dispatcher.receiver.handle_next(timeout=2)
proc.join(timeout=2)
assert l == [54]
assert not proc.is_alive()
assert proc.exitcode == 0
def receive_events_until_closed(receiver):
l = []
def cb(arg):
l.append(arg)
receiver.connect("cb", cb)
receiver.handle_until_closed()
assert l == [43, 54, 87]
def test_handle_until_closed_in_child_proc(dispatcher):
proc = Process(target=receive_events_until_closed,
args=[dispatcher.receiver])
proc.daemon = True
proc.start()
dispatcher.sender.fire("cb", 43)
dispatcher.sender.fire("cb", 54)
dispatcher.sender.fire("cb", 87)
dispatcher.sender.fire("some_nonexistent_event", 100)
dispatcher.sender.close()
proc.join(timeout=2)
assert not proc.is_alive()
assert proc.exitcode == 0
def receive_events_until_87(receiver):
l = []
def cb(arg):
l.append(arg)
if arg == 87:
receiver.close()
receiver.connect("cb", cb)
receiver.handle_until_closed()
assert l == [43, 54, 87]
def test_handle_until_closed_in_child_proc_closing_itself(dispatcher):
proc = Process(target=receive_events_until_87,
args=[dispatcher.receiver])
proc.daemon = True
proc.start()
dispatcher.sender.fire("cb", 43)
dispatcher.sender.fire("cb", 54)
dispatcher.sender.fire("cb", 87)
proc.join(timeout=2)
assert not proc.is_alive()
assert proc.exitcode == 0
def receive_events_until_blocking(receiver, aux_in, aux_out):
l = []
def cb(arg):
l.append(arg)
receiver.connect("cb", cb)
assert aux_in.get(timeout=5) == "sent_3"
receiver.handle_until_blocking()
aux_out.put("blocks")
assert l == [43, 54, 87]
def test_handle_until_blocking(dispatcher):
aux_to_child, aux_from_child = mp.Queue(), mp.Queue()
proc = Process(target=receive_events_until_blocking,
args=[dispatcher.receiver, aux_to_child, aux_from_child])
proc.daemon = True
proc.start()
dispatcher.sender.fire("cb", 43)
dispatcher.sender.fire("cb", 54)
dispatcher.sender.fire("cb", 87)
aux_to_child.put("sent_3")
assert aux_from_child.get("blocks")
dispatcher.sender.fire("cb", 100)
dispatcher.sender.close()
proc.join(timeout=2)
assert not proc.is_alive()
assert proc.exitcode == 0
def receive_events_and_run_concurrent_coro(receiver):
l = []
l2 = []
async def some_concurrent_coro():
for i in range(3):
l2.append(i**2)
await asyncio.sleep(0.1)
def cb(arg):
l.append(arg)
receiver.connect("cb", cb)
async def asyncio_main(receiver):
await asyncio.wait([
asyncio.create_task(x) for x in [
some_concurrent_coro(),
receiver.coro_handle_until_closed()
]
])
asyncio.run(asyncio_main(receiver))
assert l == [43, 54, 87]
assert l2 == [0, 1, 4]
@pytest.mark.skipif(sys.version_info < (3, 7),
reason="requires python3.7 or higher")
def test_coro_handle_until_closed_in_child_proc(dispatcher):
proc = Process(target=receive_events_and_run_concurrent_coro,
args=[dispatcher.receiver])
proc.daemon = True
proc.start()
dispatcher.sender.fire("cb", 43)
dispatcher.sender.fire("cb", 54)
dispatcher.sender.fire("cb", 87)
dispatcher.sender.close()
proc.join(timeout=2)
assert not proc.is_alive()
assert proc.exitcode == 0
def receive_events_threaded_and_run_parallel_coro(receiver):
# note: asyncio here plays the role of any other event loop, as seen in
# various GUI toolkits etc., so it's entirely incidental that this uses
# coroutines instead of just synchronous functions with threads in the
# background
l = []
l2 = []
async def some_stoppable_coro(should_stop):
await should_stop.wait()
await asyncio.sleep(0.1)
l2.extend([0,1,4])
def cb(arg):
l.append(arg)
receiver.connect("cb", cb)
async def asyncio_main(receiver):
should_stop = asyncio.Event()
def stop_coro():
should_stop.set()
receiver.connect("stop_coro", stop_coro)
loop = asyncio.get_running_loop()
receiver.threaded_handle_until_closed(call_via=loop.call_soon_threadsafe)
await some_stoppable_coro(should_stop)
asyncio.run(asyncio_main(receiver))
assert l == [43, 54, 87]
assert l2 == [0, 1, 4]
@pytest.mark.skipif(sys.version_info < (3, 7),
reason="requires python3.7 or higher")
def test_threaded_handle_until_closed_in_child_proc(dispatcher):
proc = Process(target=receive_events_threaded_and_run_parallel_coro,
args=[dispatcher.receiver])
proc.daemon = True
proc.start()
dispatcher.sender.fire("cb", 43)
dispatcher.sender.fire("cb", 54)
dispatcher.sender.fire("cb", 87)
dispatcher.sender.fire("stop_coro")
dispatcher.sender.close()
proc.join(timeout=2)
assert not proc.is_alive()
assert proc.exitcode == 0
|
factory.py | __all__ = [
'KeyServerFactory',
'BasicKeyServer'
]
import copy
import csv
import json
import os
import sys
import types
import warnings
from collections import OrderedDict
from contextlib import ExitStack
import math
import numpy as np
import pandas as pd
from ..utils.data import get_json, get_location_df
from ..utils.exceptions import OasisException
from ..utils.log import oasis_log
from ..utils.path import import_from_string, get_custom_module, as_path
from ..utils.status import OASIS_KEYS_STATUS
from .builtin import DeterministicLookup
from .builtin import Lookup as NewLookup
from multiprocessing import cpu_count, Queue, Process
from queue import Empty, Full
# add pickling support for traceback object
import tblib.pickling_support
tblib.pickling_support.install()
def with_error_queue(fct):
def wrapped_fct(error_queue, *args, **kwargs):
try:
return fct(error_queue, *args, **kwargs)
except Exception:
error_queue.put(sys.exc_info())
return wrapped_fct
class KeyServerFactory(object):
"""
A factory class to create the Keys Server that will be use to generate the keys files
All Key Server must implement the interface defined in lookup.interface.KeyServerInterface
Oasis provides a built-in Key Server that manage the generation of the key files from the key provided by
a built-in or a custom Key Lookup.
The factory now return a KeyServer object and not a KeyLookup.
The parameter to pass has also been simplified
usage of all the below parameter are now deprecated
- complex_lookup_config_fp => pass the path to your complex lookup config directly in lookup_config_fg
- lookup_module_path => set as key 'lookup_module_path' in the lookup config
- model_keys_data_path => set as key 'keys_data_path' in the lookup config
- model_version_file_path => set the model information ('supplier_id', 'model_id', 'model_version') directly
into the config
"""
@classmethod
def get_config(cls, config_fp):
return as_path(os.path.dirname(config_fp), 'config_fp'), get_json(config_fp)
@classmethod
def get_model_info(cls, model_version_file_path):
"""
Get model information from the model version file.
"""
model_version_file_path = as_path(model_version_file_path, 'model_version_file_path', preexists=True, null_is_valid=False)
with open(model_version_file_path, 'r', encoding='utf-8') as f:
return next(csv.DictReader(
f, fieldnames=['supplier_id', 'model_id', 'model_version']
))
@classmethod
def update_deprecated_args(cls, config_dir, config,
complex_lookup_config_fp, model_keys_data_path, model_version_file_path, lookup_module_path):
if (complex_lookup_config_fp
or model_keys_data_path
or model_version_file_path
or lookup_module_path):
warnings.warn('usage of complex_lookup_config_fp, model_keys_data_path, '
'model_version_file_path and lookup_module_path is now deprecated'
'those variables now need to be set in lookup config see (key server documentation)')
if complex_lookup_config_fp:
config_dir, config = cls.get_config(complex_lookup_config_fp)
if model_keys_data_path:
config['keys_data_path'] = as_path(model_keys_data_path, 'model_keys_data_path', preexists=True)
if model_version_file_path:
config['model'] = cls.get_model_info(model_version_file_path)
if lookup_module_path:
config['lookup_module_path'] = lookup_module_path
return config_dir, config
@classmethod
def create(
cls,
model_keys_data_path=None,
model_version_file_path=None,
lookup_module_path=None,
lookup_config=None,
lookup_config_json=None,
lookup_config_fp=None,
complex_lookup_config_fp=None,
user_data_dir=None,
output_directory=None,
):
"""
Creates a keys lookup class instance for the given model and supplier -
local file paths are required for the model keys data folder, the model
version file and the Git repository for the model keys server. Returns a
pair ``(model_info, klc)``, where ``model_info`` is a dictionary holding
model information from the model version file and `klc` is the lookup
service class instance for the model.
"""
if lookup_config:
config_dir = '.'
config = lookup_config
elif lookup_config_json:
config_dir = '.'
config = json.loads(lookup_config_json)
elif lookup_config_fp:
config_dir, config = cls.get_config(lookup_config_fp)
else: # no config
config_dir, config = '.', {}
if not config:
config_dir, config = cls.update_deprecated_args(config_dir, config,
complex_lookup_config_fp, model_keys_data_path,
model_version_file_path, lookup_module_path)
else: # reproduce lookup_config overwrite complex_lookup_config_fp
complex_lookup_config_fp = None
if config.get('key_server_module_path'):
_KeyServer = get_custom_module(config.get('key_server_module_path'), 'key_server_module_path')
else:
_KeyServer = BasicKeyServer
if _KeyServer.interface_version == '1':
key_server = _KeyServer(config,
config_dir=config_dir,
user_data_dir=user_data_dir,
output_dir=output_directory)
else:
raise OasisException(f"KeyServer interface version {_KeyServer.interface_version} not implemented")
if complex_lookup_config_fp:
key_server.complex_lookup_config_fp = complex_lookup_config_fp
return config['model'], key_server
class BasicKeyServer:
"""
A basic implementation of the KeyServerInterface
will load the KeyLookup class from config['lookup_module_path'] if present or used the built-in KeyLookup
KeyLookup must implement the KeyLookupInterface
will provide a multiprocess solution if KeyLoopup implement the process_locations_multiproc method
both single and multiprocess solutions will use low amount of memory
as they process the key by chunk of limited size.
This class implement all the file writing method that were previously handled by the lookup factory
"""
interface_version = "1"
valid_format = ['oasis', 'json']
error_heading_row = OrderedDict([
('loc_id', 'LocID'),
('peril_id', 'PerilID'),
('coverage_type', 'CoverageTypeID'),
('status', 'Status'),
('message', 'Message'),
])
model_data_heading_row = OrderedDict([
('loc_id', 'LocID'),
('peril_id', 'PerilID'),
('coverage_type', 'CoverageTypeID'),
('model_data', 'ModelData'),
])
key_success_heading_row = OrderedDict([
('loc_id', 'LocID'),
('peril_id', 'PerilID'),
('coverage_type', 'CoverageTypeID'),
('area_peril_id', 'AreaPerilID'),
('vulnerability_id', 'VulnerabilityID'),
])
key_success_with_message_heading_row = OrderedDict([
('loc_id', 'LocID'),
('peril_id', 'PerilID'),
('coverage_type', 'CoverageTypeID'),
('area_peril_id', 'AreaPerilID'),
('vulnerability_id', 'VulnerabilityID'),
('message', 'Message')
])
min_bloc_size = 1000
max_bloc_size = 10000
def __init__(self, config, config_dir=None, user_data_dir=None, output_dir=None):
self.config = config
self.config_dir = config_dir or '.'
self.user_data_dir = user_data_dir
self.output_dir = output_dir
self.lookup_cls = self.get_lookup_cls()
def get_lookup_cls(self):
if self.config.get('lookup_class'):
lookup_cls = import_from_string(self.config.get('lookup_class'))
elif self.config.get('lookup_module'):
lookup_module = import_from_string(self.config.get('lookup_module'))
lookup_cls = getattr(lookup_module, '{}KeysLookup'.format(self.config['model']['model_id']))
elif self.config.get('lookup_module_path'):
lookup_module_path = self.config.get('lookup_module_path')
if not os.path.isabs(lookup_module_path):
lookup_module_path = os.path.join(self.config_dir, lookup_module_path)
lookup_module = get_custom_module(lookup_module_path, 'lookup_module_path')
lookup_cls = getattr(lookup_module, '{}KeysLookup'.format(self.config['model']['model_id']))
else: # built-in lookup
if self.config.get('builtin_lookup_type') == 'deterministic':
lookup_cls = DeterministicLookup
elif self.config.get('builtin_lookup_type') == 'new_lookup':
lookup_cls = NewLookup
else:
raise OasisException(f"Unrecognised lookup config file, or config file is from deprecated built in lookup module 'oasislmf<=1.16.0' ")
return lookup_cls
@staticmethod
def create_lookup(lookup_cls, config, config_dir, user_data_dir, output_dir, lookup_id):
lookup_config = copy.deepcopy(config)
lookup_config['lookup_id'] = lookup_id
lookup_interface_version = getattr(lookup_cls, 'interface_version', '0')
if lookup_interface_version == '1':
return lookup_cls(config,
config_dir=config_dir,
user_data_dir=user_data_dir,
output_dir=output_dir)
elif lookup_interface_version == '0':
warnings.warn('OasisLookupInterface (or OasisBaseKeysLookup) is now deprecated'
' Interface for lookup is now lookup.interface.LookupInterface'
' for similar functionality use lookup.base.AbstractBasicKeyLookup'
' for multiprocess implementation add lookup.base.MultiprocLookupMixin')
if not (config and output_dir):
return lookup_cls(
keys_data_directory=config.get('keys_data_path'),
supplier=config['model']['supplier_id'],
model_name=config['model']['model_id'],
model_version=config['model']['model_version'],
)
elif not user_data_dir:
return lookup_cls(
keys_data_directory=config.get('keys_data_path'),
supplier=config['model']['supplier_id'],
model_name=config['model']['model_id'],
model_version=config['model']['model_version'],
complex_lookup_config_fp=config_dir,
output_directory=output_dir
)
else:
return lookup_cls(
keys_data_directory=config.get('keys_data_path'),
supplier=config['model']['supplier_id'],
model_name=config['model']['model_id'],
model_version=config['model']['model_version'],
complex_lookup_config_fp=config_dir,
user_data_dir=user_data_dir,
output_directory=output_dir
)
else:
raise OasisException(f"lookup interface version {lookup_interface_version} not implemented")
def get_locations(self, location_fp):
"""load exposure data from location_fp and return the exposure dataframe"""
return get_location_df(location_fp)
@staticmethod
@with_error_queue
def location_producer(error_queue, loc_df, part_count, loc_queue):
loc_ids_parts = np.array_split(np.unique(loc_df['loc_id']), part_count)
loc_df_parts = (loc_df[loc_df['loc_id'].isin(loc_ids_parts[i])] for i in range(part_count))
loc_df_part = True
while loc_df_part is not None:
loc_df_part = next(loc_df_parts, None)
while error_queue.empty():
try:
loc_queue.put(loc_df_part, timeout=5)
break
except Full:
pass
else:
return
@staticmethod
@with_error_queue
def lookup_multiproc_worker(error_queue, lookup_cls, config, config_dir, user_data_dir, output_dir, lookup_id, loc_queue, key_queue):
lookup = BasicKeyServer.create_lookup(lookup_cls, config, config_dir, user_data_dir, output_dir, lookup_id)
while True:
while error_queue.empty():
try:
loc_df_part = loc_queue.get(timeout=5)
break
except Empty:
pass
else:
return
if loc_df_part is None:
loc_queue.put(None)
key_queue.put(None)
break
while error_queue.empty():
try:
key_queue.put(lookup.process_locations_multiproc(loc_df_part), timeout=5)
break
except Full:
pass
else:
return
@staticmethod
def key_producer(key_queue, error_queue, worker_count):
finished_workers = 0
while finished_workers < worker_count and error_queue.empty():
while error_queue.empty():
try:
res = key_queue.get(timeout=5)
break
except Empty:
pass
else:
break
if res is None:
finished_workers+=1
else:
yield res
def get_success_heading_row(self, keys, keys_success_msg):
if 'model_data' in keys:
return self.model_data_heading_row
elif keys_success_msg:
return self.key_success_with_message_heading_row
else:
return self.key_success_heading_row
def write_json_keys_file(self, results, keys_success_msg, successes_fp, errors_fp):
# no streaming implementation for json format
results = pd.concat((r for r in results if not r.empty))
success = results['status'] == OASIS_KEYS_STATUS['success']['id']
success_df = results[success]
success_df.to_json(successes_fp, orient='records', indent=4, force_ascii=False)
successes_count = success_df.shape[0]
if errors_fp:
errors_df = results[~success]
errors_df.to_json(errors_fp, orient='records', indent=4, force_ascii=False)
error_count = errors_df.shape[0]
else:
error_count = 0
return successes_count, error_count
def write_oasis_keys_file(self, results, keys_success_msg, successes_fp, errors_fp):
with ExitStack() as stack:
successes_file = stack.enter_context(open(successes_fp, 'w', encoding='utf-8'))
if errors_fp:
errors_file = stack.enter_context(open(errors_fp, 'w', encoding='utf-8'))
errors_file.write(','.join(self.error_heading_row.values()) + '\n')
else:
errors_file = None
success_heading_row = None
successes_count = 0
error_count = 0
for i, result in enumerate(results):
success = result['status'] == OASIS_KEYS_STATUS['success']['id']
success_df = result[success]
if success_heading_row is None:
success_heading_row = self.get_success_heading_row(result.columns, keys_success_msg)
success_df[success_heading_row.keys()].rename(columns=success_heading_row
).to_csv(successes_file, index=False, header=not i)
successes_count += success_df.shape[0]
if errors_file:
errors_df = result[~success]
errors_df[self.error_heading_row.keys()].rename(columns=self.error_heading_row
).to_csv(errors_file, index=False, header=False)
error_count += errors_df.shape[0]
return successes_count, error_count
def write_keys_file(self, results, successes_fp, errors_fp, output_format, keys_success_msg):
if output_format not in self.valid_format:
raise OasisException(f"Unrecognised lookup file output format {output_format} - valid formats are {self.valid_format}")
write = getattr(self, f'write_{output_format}_keys_file')
successes_count, error_count = write(results, keys_success_msg, successes_fp, errors_fp)
if errors_fp:
return successes_fp, successes_count, errors_fp, error_count
else:
return successes_fp, successes_count
def generate_key_files_singleproc(self, loc_df, successes_fp, errors_fp, output_format, keys_success_msg, **kwargs):
if getattr(self, 'complex_lookup_config_fp', None): # backward compatibility 1.15 hack
config_dir = getattr(self, 'complex_lookup_config_fp', None)
else:
config_dir = self.config_dir
lookup = self.create_lookup(self.lookup_cls, self.config, config_dir, self.user_data_dir, self.output_dir,
lookup_id=None)
key_results = lookup.process_locations(loc_df)
def gen_results(results):
if isinstance(results, pd.DataFrame):
yield results
elif isinstance(results, (list, tuple)):
yield pd.DataFrame(results)
elif isinstance(results, types.GeneratorType):
results_part = pd.DataFrame.from_records(results, nrows=self.max_bloc_size)
while not results_part.empty:
yield results_part
results_part = pd.DataFrame.from_records(results, nrows=self.max_bloc_size)
else:
raise OasisException("Unrecognised type for results: {type(results)}. expected ")
return self.write_keys_file(gen_results(key_results),
successes_fp=successes_fp,
errors_fp=errors_fp,
output_format=output_format,
keys_success_msg=keys_success_msg,)
def generate_key_files_multiproc(self, loc_df, successes_fp, errors_fp, output_format, keys_success_msg,
num_cores, num_partitions, **kwargs):
"""
Process and return the lookup results a location row
Used in multiprocessing based query
location_row is of type <class 'pandas.core.series.Series'>
"""
if getattr(self, 'complex_lookup_config_fp', None): # backward compatibility 1.15 hack
config_dir = getattr(self, 'complex_lookup_config_fp', None)
else:
config_dir = self.config_dir
pool_count = num_cores if num_cores > 0 else cpu_count()
if num_partitions > 0:
part_count = num_partitions
else:
bloc_size = min(max(math.ceil(loc_df.shape[0] / pool_count), self.min_bloc_size), self.max_bloc_size)
part_count = math.ceil(loc_df.shape[0] / bloc_size)
pool_count = min(pool_count, part_count)
if pool_count <= 1:
return self.generate_key_files_singleproc(loc_df, successes_fp, errors_fp, output_format, keys_success_msg)
loc_queue = Queue(maxsize=pool_count)
key_queue = Queue(maxsize=pool_count)
error_queue = Queue()
location_producer = Process(target=self.location_producer, args=(error_queue, loc_df, part_count, loc_queue))
workers = [Process(target=self.lookup_multiproc_worker,
args=(error_queue, self.lookup_cls, self.config, config_dir,
self.user_data_dir, self.output_dir,
lookup_id, loc_queue, key_queue))
for lookup_id in range(pool_count)]
location_producer.start()
[worker.start() for worker in workers]
try:
return self.write_keys_file(self.key_producer(key_queue, error_queue, worker_count= pool_count),
successes_fp=successes_fp,
errors_fp=errors_fp,
output_format=output_format,
keys_success_msg=keys_success_msg,)
except Exception:
error_queue.put(sys.exc_info())
finally:
for process in [location_producer] + workers:
if process.is_alive():
process.terminate()
process.join()
loc_queue.close()
key_queue.close()
if not error_queue.empty():
exc_info = error_queue.get()
raise exc_info[0].with_traceback(exc_info[1], exc_info[2])
@oasis_log()
def generate_key_files(
self,
location_fp,
successes_fp,
errors_fp=None,
output_format='oasis',
keys_success_msg=False,
multiproc_enabled=True,
multiproc_num_cores=-1,
multiproc_num_partitions=-1,
location_df=None,
**kwargs
):
"""
generate key files by calling:
1. get_locations to get a location object from the location_fp
2. process_locations or process_locations_multiproc to get results object from the locations object
3. write_keys_file to writes the relevant files from the results object
"""
successes_fp = as_path(successes_fp, 'successes_fp', preexists=False)
errors_fp = as_path(errors_fp, 'errors_fp', preexists=False)
if location_df is not None:
locations = location_df
else:
locations = self.get_locations(location_fp)
if multiproc_enabled and hasattr(self.lookup_cls, 'process_locations_multiproc'):
return self.generate_key_files_multiproc(locations,
successes_fp=successes_fp,
errors_fp=errors_fp,
output_format=output_format,
keys_success_msg=keys_success_msg,
num_cores = multiproc_num_cores,
num_partitions = multiproc_num_partitions)
else:
return self.generate_key_files_singleproc(locations,
successes_fp=successes_fp,
errors_fp=errors_fp,
output_format=output_format,
keys_success_msg=keys_success_msg,
)
|
tkGuiControl.py | import serial
import time
import threading
import sys
NMB_OF_SLIDERS = 5
#PORT = "loop://logging=debug"
PORT = "/dev/ttyACM0"
TIMEOUT = 1
if sys.version_info >= (3, 0):
from tkinter import *
def data(string):
return bytes(string, 'latin1')
else:
from Tkinter import *
def data(string): return string
class App(Frame):
def __init__(self, master=None):
Frame.__init__(self,master)
master.protocol("WM_DELETE_WINDOW", self.stop)
self.pack()
self.createWidgets()
self.serial = serial.serial_for_url(PORT, timeout=TIMEOUT)
self.alive = True
self.thread_read = threading.Thread(target=self.reader)
self.thread_read.setDaemon(True)
self.thread_read.setName('read serial')
self.thread_read.start()
def reader(self):
"""loop forever """
while self.alive:
try:
data = self.serial.readline() # read one line, blocking
if data:
print ('received: <' + data + '>')
# here some parsing is done, to get the real
# positions for the sliders, as feedback from arduino
values = data.decode().split(',')
assert (len(values) == NMB_OF_SLIDERS)
for i in range(NMB_OF_SLIDERS):
self.sliders[i].set(int(values[i]))
except:
sys.stderr.write('ERROR: %s\n' % sys.exc_info()[0] )
raise
self.alive = False
def sendValues(self,event):
status = ""
for i in range(NMB_OF_SLIDERS):
val = self.sliders[i].get()
status += str (val)
if (i != NMB_OF_SLIDERS - 1):
status += ", "
self.serial.write(data(status + '\n'))
self.STATUS.config(text="Sent: <" + status + ">")
def addSlider(self):
slider = Scale(from_=0, to=180, resolution=1)
slider.set(90)
slider.pack(side = "left", expand=1, fill="both")
slider.bind("<ButtonRelease-1>", self.sendValues)
self.sliders.append(slider)
def stop(self):
if self.alive:
self.alive = False
self.thread_read.join()
self.quit();
def createWidgets(self):
self.STATUS = Label()
self.STATUS["text"] = "Status"
self.STATUS.pack(side="bottom",fill="x", anchor="w")
self.sliders = []
for i in range(NMB_OF_SLIDERS):
self.addSlider()
self.QUIT = Button()
self.QUIT["text"] = "Quit"
self.QUIT["command"] = self.stop
self.QUIT.pack(side = "right", fill="y", anchor="e")
root = Tk()
root.geometry("480x320")
app = App(master=root)
app.master.title("pyRobotArm")
app.mainloop()
root.destroy()
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import binascii
import time
import threading
import os
import traceback
import json
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional, TYPE_CHECKING, Sequence, List, Union
import eth_abi
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor, QFont
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal, QPoint
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QSizePolicy, QStatusBar, QToolTip, QSplitter, QDialog,
QMenu, QAction, QStackedWidget, QToolButton)
import electrum
from electrum.bitcoin import COIN, is_address, b58_address_to_hash160, Token, opcodes, \
TYPE_SCRIPT, is_hash160, hash_160, eth_abi_encode, Delegation, DELEGATE_ABI, DELEGATION_CONTRACT
from electrum import (keystore, ecc, constants, util, bitcoin, commands,
paymentrequest, lnutil)
from electrum.plugin import run_hook, BasePlugin
from electrum.i18n import _
from electrum.util import (format_time,
UserCancelled, profiler,
bfh, InvalidPassword,
UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI, maybe_extract_bolt11_invoice, NotEnoughFunds,
NoDynamicFeeEstimates, MultipleSpendMaxTxOutputs,
AddTransactionException)
from electrum.invoices import PR_TYPE_ONCHAIN, PR_TYPE_LN, PR_DEFAULT_EXPIRATION_WHEN_CREATING, Invoice
from electrum.invoices import PR_PAID, PR_FAILED, pr_expiration_values, LNInvoice, OnchainInvoice
from electrum.transaction import (Transaction, PartialTxInput,
PartialTransaction, PartialTxOutput)
from electrum.transaction import contract_script, decode_opcreate_script, decode_opsender_script
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption,
CannotDoubleSpendTx)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed, UntrustedServerReturnedError
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from electrum.logging import Logger
from electrum.lnutil import ln_dummy_address, extract_nodeid, ConnStringFormatError
from electrum.lnaddr import lndecode, LnDecodeException
from electrum.plugins.trezor.trezor import TrezorKeyStore
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, FreezableLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider, FeeComboBox
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton,
import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen,
TRANSACTION_FILE_EXTENSION_FILTER_ANY, MONOSPACE_FONT, getOpenFileName, getSaveFileName)
from .util import ButtonsTextEdit, ButtonsLineEdit
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .channels_list import ChannelsList
from .confirm_tx_dialog import ConfirmTxDialog
from .transaction_dialog import PreviewTxDialog
from .token_dialog import TokenAddDialog, TokenInfoDialog, TokenSendDialog
from .smart_contract_dialog import ContractCreateDialog, ContractEditDialog, ContractFuncDialog
from .delegation_dialog import DelegationDialog
from electrum.coinchooser import SenderNoUTXOException
if TYPE_CHECKING:
from . import ElectrumGui
LN_NUM_PAYMENT_ATTEMPTS = 10
class StatusBarButton(QToolButton):
def __init__(self, icon, tooltip, func):
QToolButton.__init__(self)
self.setText('')
self.setIcon(icon)
self.setToolTip(tooltip)
self.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
self.setAutoRaise(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]:
self.func()
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
#ln_payment_attempt_signal = pyqtSignal(str)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
show_error_signal = pyqtSignal(str)
payment_request: Optional[paymentrequest.PaymentRequest]
def __init__(self, gui_object: 'ElectrumGui', wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
assert wallet, "no wallet"
self.wallet = wallet
if wallet.has_lightning():
self.wallet.config.set_key('show_channels_tab', True)
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
self.fx = gui_object.daemon.fx # type: FxThread
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self._cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.payto_URI = None
self.checking_accounts = False
self.qr_window = None
self.pluginsdialog = None
self.showing_cert_mismatch_error = False
self.tl_windows = []
self.pending_invoice = None
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.completions = QStringListModel()
coincontrol_sb = self.create_coincontrol_statusbar()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
self.channels_tab = self.create_channels_tab()
self.tokens_tab = self.create_tokens_tab()
self.smart_contract_tab = self.create_smart_contract_tab()
self.delegations_tab = self.create_delegations_tab()
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
tabs.addTab(self.tokens_tab, read_QIcon("tab_contacts.png"), _('Tokens'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
if self.wallet.has_lightning():
add_optional_tab(tabs, self.channels_tab, read_QIcon("lightning.png"), _("Channels"), "channels")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
add_optional_tab(tabs, self.smart_contract_tab, read_QIcon("tab_console.png"), _('Smart Contract'), 'contract')
add_optional_tab(tabs, self.delegations_tab, read_QIcon("tab_console.png"), _('Delegations'), 'delegations')
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
central_widget = QWidget()
vbox = QVBoxLayout(central_widget)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.addWidget(tabs)
vbox.addWidget(coincontrol_sb)
self.setCentralWidget(central_widget)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.show_error_signal.connect(self.show_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram', 'on_quotes',
'on_history', 'channel', 'channels_updated',
'payment_failed', 'payment_succeeded', 'on_token', 'on_delegation',
'invoice_status', 'request_status', 'ln_gossip_sync_progress',
'cert_mismatch', 'gossip_db_loaded']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
util.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
# update fee slider in case we missed the callback
#self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates') is None:
choice = self.question(title="Qtum Electrum - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
self._update_check_thread = None
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Qtum Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread()
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def setup_exception_hook(self):
Exception_Hook.maybe_setup(config=self.config,
wallet=self.wallet)
def run_coroutine_from_thread(self, coro, on_result=None):
def task():
try:
f = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
r = f.result()
if on_result:
on_result(r)
except Exception as e:
self.logger.exception("exception in coro scheduled via window.wallet")
self.show_error_signal.emit(str(e))
self.wallet.thread.add(task)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_fx_token(self):
self.token_hist_model.refresh('fx_token')
self.token_hist_list.update()
self.token_balance_list.update()
def on_fx_delegation(self):
self.delegation_list.update()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
# TODO would be nice if we just sent these to the crash reporter...
# anything we don't want to send there, we should explicitly catch
# send_exception_to_crash_reporter(e)
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(repr(e))
def on_network(self, event, *args):
# Handle in GUI thread
self.network_signal.emit(event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
# note: all windows get events from all wallets!
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event == 'on_quotes':
self.on_fx_quotes()
elif event == 'on_history':
self.on_fx_history()
elif event == 'on_token':
self.on_fx_token()
elif event == 'on_delegation':
self.on_fx_delegation()
elif event == 'gossip_db_loaded':
self.channels_list.gossip_db_loaded.emit(*args)
elif event == 'channels_updated':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_rows.emit(*args)
elif event == 'channel':
wallet = args[0]
if wallet == self.wallet:
self.channels_list.update_single_row.emit(*args)
self.update_status()
elif event == 'request_status':
self.on_request_status(*args)
elif event == 'invoice_status':
self.on_invoice_status(*args)
elif event == 'payment_succeeded':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_succeeded(*args)
elif event == 'payment_failed':
wallet = args[0]
if wallet == self.wallet:
self.on_payment_failed(*args)
elif event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
pass
elif event == 'fee_histogram':
self.history_model.on_fee_histogram()
elif event == 'ln_gossip_sync_progress':
self.update_lightning_icon()
elif event == 'cert_mismatch':
self.show_cert_mismatch_error()
else:
self.logger.info(f"unexpected network event: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet: Abstract_Wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
if wallet.has_lightning():
util.trigger_callback('channels_updated', wallet)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.channels_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.db.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Qtum Electrum Testnet" if constants.net.TESTNET else "Qtum Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.db.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend QTUMs with it."),
_("Make sure you own the seed phrase or the private keys, before you request QTUMs to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Qtum network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def select_backup_dir(self, b):
name = self.config.get('backup_dir', '')
dirname = QFileDialog.getExistingDirectory(self, "Select your wallet backup directory", name)
if dirname:
self.config.set_key('backup_dir', dirname)
self.backup_dir_e.setText(dirname)
def backup_wallet(self):
d = WindowModalDialog(self, _("File Backup"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
backup_help = ""
backup_dir = self.config.get('backup_dir')
backup_dir_label = HelpLabel(_('Backup directory') + ':', backup_help)
msg = _('Please select a backup directory')
if self.wallet.has_lightning() and self.wallet.lnworker.channels:
msg += '\n\n' + ' '.join([
_("Note that lightning channels will be converted to channel backups."),
_("You cannot use channel backups to perform lightning payments."),
_("Channel backups can only be used to request your channels to be closed.")
])
self.backup_dir_e = QPushButton(backup_dir)
self.backup_dir_e.clicked.connect(self.select_backup_dir)
grid.addWidget(backup_dir_label, 1, 0)
grid.addWidget(self.backup_dir_e, 1, 1)
vbox.addLayout(grid)
vbox.addWidget(WWLabel(msg))
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
try:
new_path = self.wallet.save_backup()
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
return
if new_path:
msg = _("A copy of your wallet file was created in")+" '%s'" % str(new_path)
self.show_message(msg, title=_("Wallet backup created"))
else:
self.show_message(_("You need to configure a backup directory in your preferences"), title=_("Backup not created"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.wallet.storage.path))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save backup"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_wallet_info)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.export_invoices())
requests_menu = wallet_menu.addMenu(_("Requests"))
requests_menu.addAction(_("Import"), lambda: self.import_requests())
requests_menu.addAction(_("Export"), lambda: self.export_requests())
try:
addr_type, __ = b58_address_to_hash160(self.addresses[0])
except:
addr_type = constants.net.SEGWIT_HRP
if not isinstance(self.wallet.keystore, TrezorKeyStore) and addr_type == constants.net.ADDRTYPE_P2PKH and not self.wallet.is_watching_only():
token_menu = wallet_menu.addMenu(_("&Token"))
token_menu.addAction(_("Add Token"), lambda: self.token_add_dialog())
smart_cotract_menu = wallet_menu.addMenu(_("&Smart Contract"))
smart_cotract_menu.addAction(_("Add Contract"), lambda: self.contract_add_dialog())
smart_cotract_menu.addAction(_("Create Contract"), lambda: self.contract_create_dialog())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
if self.wallet.has_lightning():
add_toggle_action(view_menu, self.channels_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
add_toggle_action(view_menu, self.smart_contract_tab)
add_toggle_action(view_menu, self.delegations_tab)
tools_menu = menubar.addMenu(_("&Tools")) # type: QMenu
preferences_action = tools_menu.addAction(_("Preferences"), self.settings_dialog) # type: QAction
if sys.platform == 'darwin':
# "Settings"/"Preferences" are all reserved keywords in macOS.
# preferences_action will get picked up based on name (and put into a standardized location,
# and given a standard reserved hotkey)
# Hence, this menu item will be at a "uniform location re macOS processes"
preferences_action.setMenuRole(QAction.PreferencesRole) # make sure OS recognizes it as preferences
# Add another preferences item, to also have a "uniform location for Electrum between different OSes"
tools_menu.addAction(_("Electrum preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), self.gui_object.show_network_dialog).setEnabled(bool(self.network))
tools_menu.addAction(_("&Lightning Network"), self.gui_object.show_lightning_dialog).setEnabled(bool(self.wallet.has_lightning() and self.network))
tools_menu.addAction(_("Local &Watchtower"), self.gui_object.show_watchtower_dialog).setEnabled(bool(self.network and self.network.local_watchtower))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign POD"), self.sign_pod)
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://github.com/qtumproject/qtum-electrum/"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().server.host
self.pay_to_URI('qtum:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Qtum Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("This software is based on Electrum to support Qtum.") + " " +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(latest_version=version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Qtum Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
total_amount += tx_wallet_delta.delta
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
tx_wallet_delta = self.wallet.get_wallet_delta(tx)
if not tx_wallet_delta.is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(tx_wallet_delta.delta)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Qtum Electrum", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Qtum Electrum", message, QSystemTrayIcon.Information, 20000)
def timer_actions(self):
self.request_list.refresh_status()
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False, num_zeros=None, decimal_point=None):
# x is in sats
return self.config.format_amount(x, is_diff, whitespaces, num_zeros, decimal_point)
def format_amount_and_units(self, amount):
# amount is in sats
text = self.config.format_amount_and_units(amount)
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return self.config.format_fee_rate(fee_rate)
def get_decimal_point(self):
return self.config.get_decimal_point()
def base_unit(self):
return self.config.get_base_unit()
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains()) > 1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
elif server_lag < (-1):
text = _("Synchronizing headers...")
icon = read_QIcon("status_waiting.png")
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
if self.wallet.has_lightning():
l = self.wallet.lnworker.get_balance()
text += u' \U000026a1 %s'%(self.format_amount_and_units(l).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
icon = read_QIcon("status_disconnected.png")
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
elif self.network.downloading_headers:
text = _("Downloading headers...")
icon = read_QIcon("status_waiting.png")
else:
text = _("Not connected")
if self.tray:
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
if self.status_button:
self.status_button.setIcon(icon)
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.token_balance_list.update()
self.token_hist_model.refresh('update_tabs')
self.token_hist_list.update()
self.smart_contract_list.update()
self.delegation_list.update()
self.channels_list.update_rows.emit(wallet)
self.update_completions()
def create_channels_tab(self):
self.channels_list = ChannelsList(self)
t = self.channels_list.get_toolbar()
return self.create_list_tab(self.channels_list, t)
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_history', False))
l.show_toolbar(toolbar_shown)
return tab
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_channel(self, channel_id):
from . import channel_details
channel_details.ChannelDetailsDialog(self, channel_id).show()
def show_transaction(self, tx, *, tx_desc=None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, parent=self, desc=tx_desc)
def show_lightning_transaction(self, tx_item):
from .lightning_tx_dialog import LightningTxDialog
d = LightningTxDialog(self, tx_item)
d.show()
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 0, 0)
grid.addWidget(self.receive_message_e, 0, 1, 1, 4)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 1, 0)
grid.addWidget(self.receive_amount_e, 1, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 1, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.connect_fields(self, self.amount_e, self.fiat_send_e, None)
self.expires_combo = QComboBox()
evl = sorted(pr_expiration_values.items())
evl_keys = [i[0] for i in evl]
evl_values = [i[1] for i in evl]
default_expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
try:
i = evl_keys.index(default_expiry)
except ValueError:
i = 0
self.expires_combo.addItems(evl_values)
self.expires_combo.setCurrentIndex(i)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
def on_expiry(i):
self.config.set_key('request_expiry', evl_keys[i])
self.expires_combo.currentIndexChanged.connect(on_expiry)
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Qtum addresses.'),
_('The qtum address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Expires after'), msg), 2, 0)
grid.addWidget(self.expires_combo, 2, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 2, 1)
self.clear_invoice_button = QPushButton(_('Clear'))
self.clear_invoice_button.clicked.connect(self.clear_receive_tab)
self.create_invoice_button = QPushButton(_('New Address'))
self.create_invoice_button.setIcon(read_QIcon("qtum.png"))
self.create_invoice_button.setToolTip('Create on-chain request')
self.create_invoice_button.clicked.connect(lambda: self.create_invoice(False))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_invoice_button)
buttons.addWidget(self.create_invoice_button)
if self.wallet.has_lightning():
self.create_invoice_button.setText(_('New Address'))
self.create_lightning_invoice_button = QPushButton(_('Lightning'))
self.create_lightning_invoice_button.setToolTip('Create lightning request')
self.create_lightning_invoice_button.setIcon(read_QIcon("lightning.png"))
self.create_lightning_invoice_button.clicked.connect(lambda: self.create_invoice(True))
buttons.addWidget(self.create_lightning_invoice_button)
grid.addLayout(buttons, 4, 3, 1, 2)
self.receive_payreq_e = ButtonsTextEdit()
self.receive_payreq_e.setFont(QFont(MONOSPACE_FONT))
self.receive_payreq_e.addCopyButton(self.app)
self.receive_payreq_e.setReadOnly(True)
self.receive_payreq_e.textChanged.connect(self.update_receive_qr)
self.receive_payreq_e.setFocusPolicy(Qt.ClickFocus)
self.receive_qr = QRCodeWidget(fixedSize=220)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_address_e = ButtonsTextEdit()
self.receive_address_e.setFont(QFont(MONOSPACE_FONT))
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
qr_show = lambda: self.show_qrcode(str(self.receive_address_e.text()), _('Receiving address'), parent=self)
qr_icon = "qrcode_white.png" if ColorScheme.dark_scheme else "qrcode.png"
self.receive_address_e.addButton(qr_icon, qr_show, _("Show as QR code"))
self.receive_requests_label = QLabel(_('Incoming payments'))
from .request_list import RequestList
self.request_list = RequestList(self)
receive_tabs = QTabWidget()
receive_tabs.addTab(self.receive_address_e, _('Address'))
receive_tabs.addTab(self.receive_payreq_e, _('Request'))
receive_tabs.addTab(self.receive_qr, _('QR Code'))
receive_tabs.setCurrentIndex(self.config.get('receive_tabs_index', 0))
receive_tabs.currentChanged.connect(lambda i: self.config.set_key('receive_tabs_index', i))
receive_tabs_sp = receive_tabs.sizePolicy()
receive_tabs_sp.setRetainSizeWhenHidden(True)
receive_tabs.setSizePolicy(receive_tabs_sp)
def maybe_hide_receive_tabs():
receive_tabs.setVisible(bool(self.receive_payreq_e.text()))
self.receive_payreq_e.textChanged.connect(maybe_hide_receive_tabs)
maybe_hide_receive_tabs()
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addStretch()
hbox.addWidget(receive_tabs)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_requests(self, keys):
for key in keys:
self.wallet.delete_request(key)
self.request_list.update()
self.clear_receive_tab()
def delete_lightning_payreq(self, payreq_key):
self.wallet.lnworker.delete_invoice(payreq_key)
self.request_list.update()
self.invoice_list.update()
self.clear_receive_tab()
def sign_payment_request(self, addr):
alias = self.config.get('alias')
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(repr(e))
return
else:
return
def create_invoice(self, is_lightning):
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
expiry = self.config.get('request_expiry', PR_DEFAULT_EXPIRATION_WHEN_CREATING)
if is_lightning:
if not self.wallet.lnworker.channels:
self.show_error(_("You need to open a Lightning channel first."))
return
# TODO maybe show a warning if amount exceeds lnworker.num_sats_can_receive (as in kivy)
key = self.wallet.lnworker.add_request(amount, message, expiry)
else:
key = self.create_bitcoin_request(amount, message, expiry)
if not key:
return
self.address_list.update()
assert key is not None
self.request_list.update()
self.request_list.select_key(key)
# clear request fields
self.receive_amount_e.setText('')
self.receive_message_e.setText('')
# copy to clipboard
r = self.wallet.get_request(key)
content = r.invoice if r.is_lightning() else r.get_address()
title = _('Invoice') if is_lightning else _('Address')
self.do_copy(content, title=title)
def create_bitcoin_request(self, amount, message, expiration) -> Optional[str]:
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic(): # imported wallet
msg = [
_('No more addresses in your wallet.'), ' ',
_('You are using a non-deterministic wallet, which cannot create new addresses.'), ' ',
_('If you want to create new addresses, use a deterministic wallet instead.'), '\n\n',
_('Creating a new payment request will reuse one of your addresses and overwrite an existing request. Continue anyway?'),
]
if not self.question(''.join(msg)):
return
addr = self.wallet.get_receiving_address()
else: # deterministic wallet
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + repr(e))
else:
self.sign_payment_request(addr)
return addr
def do_copy(self, content: str, *, title: str = None) -> None:
self.app.clipboard().setText(content)
if title is None:
tooltip_text = _("Text copied to clipboard").format(title)
else:
tooltip_text = _("{} copied to clipboard").format(title)
QToolTip.showText(QCursor.pos(), tooltip_text, self)
def clear_receive_tab(self):
self.receive_payreq_e.setText('')
self.receive_address_e.setText('')
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
self.request_list.clearSelection()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def update_receive_qr(self):
uri = str(self.receive_payreq_e.text())
if maybe_extract_bolt11_invoice(uri):
# encode lightning invoices as uppercase so QR encoding can use
# alphanumeric mode; resulting in smaller QR codes
uri = uri.upper()
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if is_address(addr) and self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
self.payto_e.addPasteButton(self.app)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Qtum address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Qtum address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = FreezableLineEdit()
self.message_e.setMinimumWidth(700)
grid.addWidget(self.message_e, 2, 1, 1, -1)
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 3, 0)
grid.addWidget(self.amount_e, 3, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 3, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(100)
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 3, 3)
self.save_button = EnterButton(_("Save"), self.do_save_invoice)
self.send_button = EnterButton(_("Pay") + "...", self.do_pay)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.save_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 4)
self.amount_e.shortcut.connect(self.spend_max)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
#self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
self.set_onchain(False)
self.invoices_label = QLabel(_('Outgoing payments'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
hbox.addStretch(1)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
outputs = self.payto_e.get_outputs(True)
if not outputs:
return
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=self.get_coins(),
outputs=outputs,
fee=fee_est,
is_sweep=False)
try:
try:
tx = make_tx(None)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
tx = make_tx(0)
except (MultipleSpendMaxTxOutputs, NotEnoughFunds) as e:
self.max_button.setChecked(False)
self.show_error(str(e))
return
self.max_button.setChecked(True)
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
# show tooltip explaining max amount
mining_fee = tx.get_fee()
mining_fee_str = self.format_amount_and_units(mining_fee)
msg = _("Mining fee: {} (can be adjusted on next screen)").format(mining_fee_str)
if x_fee_amount:
twofactor_fee_str = self.format_amount_and_units(x_fee_amount)
msg += "\n" + _("2fa fee: {} (for the next batch of transactions)").format(twofactor_fee_str)
frozen_bal = self.get_frozen_balance_str()
if frozen_bal:
msg += "\n" + _("Some coins are frozen: {} (can be unfrozen in the Addresses or in the Coins tab)").format(frozen_bal)
QToolTip.showText(self.max_button.mapToGlobal(QPoint(0, 0)), msg)
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
@protected
def protect(self, func, args, password):
return func(*args, password)
def read_outputs(self) -> List[PartialTxOutput]:
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
return outputs
def check_send_tab_onchain_outputs_and_show_errors(self, outputs: List[PartialTxOutput]) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.scriptpubkey is None:
self.show_error(_('Qtum Address is None'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def check_send_tab_payto_line_and_show_errors(self) -> bool:
"""Returns whether there are errors.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
if len(errors) == 1 and not errors[0].is_multiline:
err = errors[0]
self.show_warning(_("Failed to parse 'Pay to' line") + ":\n" +
f"{err.line_content[:40]}...\n\n"
f"{err.exc!r}")
else:
self.show_warning(_("Invalid Lines found:") + "\n\n" +
'\n'.join([_("Line #") +
f"{err.idx+1}: {err.line_content[:40]}... ({err.exc!r})"
for err in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
return False # no errors
def pay_lightning_invoice(self, invoice: str, *, amount_msat: Optional[int]):
if amount_msat is None:
raise Exception("missing amount for LN invoice")
amount_sat = Decimal(amount_msat) / 1000
# FIXME this is currently lying to user as we truncate to satoshis
msg = _("Pay lightning invoice?") + '\n\n' + _("This will send {}?").format(self.format_amount_and_units(amount_sat))
if not self.question(msg):
return
self.save_pending_invoice()
def task():
coro = self.wallet.lnworker.pay_invoice(invoice, amount_msat=amount_msat, attempts=LN_NUM_PAYMENT_ATTEMPTS)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
return fut.result()
self.wallet.thread.add(task)
def on_request_status(self, wallet, key, status):
if wallet != self.wallet:
return
req = self.wallet.receive_requests.get(key)
if req is None:
return
if status == PR_PAID:
self.notify(_('Payment received') + '\n' + key)
self.need_update.set()
else:
self.request_list.update_item(key, req)
def on_invoice_status(self, wallet, key):
if wallet != self.wallet:
return
invoice = self.wallet.get_invoice(key)
if invoice is None:
return
status = self.wallet.get_invoice_status(invoice)
if status == PR_PAID:
self.invoice_list.update()
else:
self.invoice_list.update_item(key, invoice)
def on_payment_succeeded(self, wallet, key):
description = self.wallet.get_label(key)
self.notify(_('Payment succeeded') + '\n\n' + description)
self.need_update.set()
def on_payment_failed(self, wallet, key, reason):
self.show_error(_('Payment failed') + '\n\n' + reason)
def read_invoice(self):
if self.check_send_tab_payto_line_and_show_errors():
return
if not self._is_onchain:
invoice_str = self.payto_e.lightning_invoice
if not invoice_str:
return
if not self.wallet.has_lightning():
self.show_error(_('Lightning is disabled'))
return
invoice = LNInvoice.from_bech32(invoice_str)
if invoice.get_amount_msat() is None:
amount_sat = self.amount_e.get_amount()
if amount_sat:
invoice.amount_msat = int(amount_sat * 1000)
else:
self.show_error(_('No amount'))
return
return invoice
else:
outputs = self.read_outputs()
if self.check_send_tab_onchain_outputs_and_show_errors(outputs):
return
message = self.message_e.text()
return self.wallet.create_invoice(
outputs=outputs,
message=message,
pr=self.payment_request,
URI=self.payto_URI)
def do_save_invoice(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.save_pending_invoice()
def save_pending_invoice(self):
if not self.pending_invoice:
return
self.do_clear()
self.wallet.save_invoice(self.pending_invoice)
self.invoice_list.update()
self.pending_invoice = None
def do_pay(self):
self.pending_invoice = self.read_invoice()
if not self.pending_invoice:
return
self.do_pay_invoice(self.pending_invoice)
def pay_multiple_invoices(self, invoices):
outputs = []
for invoice in invoices:
outputs += invoice.outputs
self.pay_onchain_dialog(self.get_coins(), outputs)
def do_pay_invoice(self, invoice: 'Invoice'):
if invoice.type == PR_TYPE_LN:
assert isinstance(invoice, LNInvoice)
self.pay_lightning_invoice(invoice.invoice, amount_msat=invoice.get_amount_msat())
elif invoice.type == PR_TYPE_ONCHAIN:
assert isinstance(invoice, OnchainInvoice)
self.pay_onchain_dialog(self.get_coins(), invoice.outputs)
else:
raise Exception('unknown invoice type')
def get_coins(self, *, nonlocal_only=False) -> Sequence[PartialTxInput]:
coins = self.get_manually_selected_coins()
if coins is not None:
return coins
else:
return self.wallet.get_spendable_coins(None, nonlocal_only=nonlocal_only)
def get_manually_selected_coins(self) -> Optional[Sequence[PartialTxInput]]:
"""Return a list of selected coins or None.
Note: None means selection is not being used,
while an empty sequence means the user specifically selected that.
"""
return self.utxo_list.get_spend_list()
def get_text_not_enough_funds_mentioning_frozen(self) -> str:
text = _("Not enough funds")
frozen_str = self.get_frozen_balance_str()
if frozen_str:
text += " ({} {})".format(
frozen_str, _("are frozen")
)
return text
def get_frozen_balance_str(self) -> Optional[str]:
frozen_bal = sum(self.wallet.get_frozen_balance())
if not frozen_bal:
return None
return self.format_amount_and_units(frozen_bal)
def pay_onchain_dialog(
self, inputs: Sequence[PartialTxInput],
outputs: List[PartialTxOutput], *,
external_keypairs=None) -> None:
# trustedcoin requires this
if run_hook('abort_send', self):
return
is_sweep = bool(external_keypairs)
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(
coins=inputs,
outputs=outputs,
fee=fee_est,
is_sweep=is_sweep)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
conf_dlg = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=is_sweep)
if conf_dlg.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not conf_dlg.have_enough_funds_assuming_zero_fees():
text = self.get_text_not_enough_funds_mentioning_frozen()
self.show_message(text)
return
# shortcut to advanced preview (after "enough funds" check!)
if self.config.get('advanced_preview'):
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs)
preview_dlg.show()
return
cancelled, is_send, password, tx = conf_dlg.run()
if cancelled:
return
if is_send:
self.save_pending_invoice()
def sign_done(success):
if success:
self.broadcast_or_show(tx)
self.sign_tx_with_password(tx, callback=sign_done, password=password,
external_keypairs=external_keypairs)
else:
preview_dlg = PreviewTxDialog(
window=self,
make_tx=make_tx,
external_keypairs=external_keypairs)
preview_dlg.show()
def preview_tx_dialog(self, *, make_tx, external_keypairs=None):
d = PreviewTxDialog(make_tx=make_tx, external_keypairs=external_keypairs,
window=self)
d.show()
def broadcast_or_show(self, tx: Transaction, * , broadcast_done=None):
if tx is None:
self.show_error("tx is None")
return
if not tx.is_complete():
self.show_transaction(tx)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
self.show_transaction(tx)
return
self.broadcast_transaction(tx)
if broadcast_done:
broadcast_done(tx)
@protected
def sign_tx(self, tx, *, callback, external_keypairs, password):
self.sign_tx_with_password(tx, callback=callback, password=password, external_keypairs=external_keypairs)
def sign_tx_with_password(self, tx: PartialTransaction, *, callback, password, external_keypairs=None):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if external_keypairs:
# can sign directly
task = partial(tx.sign, external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx: Transaction):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Invoice has expired")
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
return False, e.get_message_for_gui()
except BestEffortRequestFailed as e:
return False, repr(e)
# success
txid = tx.txid()
if pr:
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(tx.serialize(), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return True, txid
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
success, msg = result
if success:
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def mktx_for_open_channel(self, funding_sat):
coins = self.get_coins(nonlocal_only=True)
make_tx = lambda fee_est: self.wallet.lnworker.mktx_for_open_channel(coins=coins,
funding_sat=funding_sat,
fee_est=fee_est)
return make_tx
def open_channel(self, connect_str, funding_sat, push_amt):
try:
extract_nodeid(connect_str)
except ConnStringFormatError as e:
self.main_window.show_error(str(e))
return
# use ConfirmTxDialog
# we need to know the fee before we broadcast, because the txid is required
make_tx = self.mktx_for_open_channel(funding_sat)
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=funding_sat, is_sweep=False)
# disable preview button because the user must not broadcast tx before establishment_flow
d.preview_button.setEnabled(False)
cancelled, is_send, password, funding_tx = d.run()
if not is_send:
return
if cancelled:
return
# read funding_sat from tx; converts '!' to int value
funding_sat = funding_tx.output_value_for_address(ln_dummy_address())
def task():
return self.wallet.lnworker.open_channel(connect_str=connect_str,
funding_tx=funding_tx,
funding_sat=funding_sat,
push_amt_sat=push_amt,
password=password)
def on_success(args):
chan, funding_tx = args
n = chan.constraints.funding_txn_minimum_depth
message = '\n'.join([
_('Channel established.'),
_('Remote peer ID') + ':' + chan.node_id.hex(),
_('This channel will be usable after {} confirmations').format(n)
])
if not funding_tx.is_complete():
message += '\n\n' + _('Please sign and broadcast the funding transaction')
self.show_message(message)
if not funding_tx.is_complete():
self.show_transaction(funding_tx)
def on_failure(exc_info):
type_, e, traceback = exc_info
self.show_error(_('Could not open channel: {}').format(repr(e)))
WaitingDialog(self, _('Opening channel...'), task, on_success, on_failure)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b: bool) -> None:
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoices(self, keys):
for key in keys:
self.wallet.delete_invoice(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = pr.get_id()
invoice = self.wallet.get_invoice(key)
if invoice and self.wallet.get_invoice_status(invoice) == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setAmount(pr.get_amount())
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(str(pr.error))
self.payment_request = None
self.do_clear()
def on_pr(self, request: 'paymentrequest.PaymentRequest'):
self.set_onchain(True)
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def parse_lightning_invoice(self, invoice):
"""Parse ln invoice, and prepare the send tab for it."""
try:
lnaddr = lndecode(invoice, expected_hrp=constants.net.SEGWIT_HRP)
except Exception as e:
raise LnDecodeException(e) from e
pubkey = lnaddr.pubkey.serialize().hex()
for k,v in lnaddr.tags:
if k == 'd':
description = v
break
else:
description = ''
self.payto_e.setFrozen(True)
self.payto_e.setText(pubkey)
self.message_e.setText(description)
if lnaddr.get_amount_sat() is not None:
self.amount_e.setAmount(lnaddr.get_amount_sat())
#self.amount_e.textEdited.emit("")
self.set_onchain(False)
def set_onchain(self, b):
self._is_onchain = b
self.max_button.setEnabled(b)
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
self.payto_URI = out
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.payment_request = None
self.payto_URI = None
self.payto_e.is_pr = False
self.set_onchain(False)
for e in [self.payto_e, self.message_e, self.amount_e]:
e.setText('')
e.setFrozen(False)
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
def set_frozen_state_of_coins(self, utxos: Sequence[PartialTxInput], freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
#vbox.setContentsMargins(0, 0, 0, 0)
#vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
tab = self.create_list_tab(l, toolbar)
toolbar_shown = bool(self.config.get('show_toolbar_addresses', False))
l.show_toolbar(toolbar_shown)
return tab
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = UTXOList(self)
return self.create_list_tab(self.utxo_list)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if not self.question(_("Do you want to remove {} from your wallet?").format(addr)):
return
try:
self.wallet.delete_address(addr)
except UserFacingException as e:
self.show_error(str(e))
else:
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_onchain_invoice(self, invoice: OnchainInvoice):
amount_str = self.format_amount(invoice.amount_sat) + ' ' + self.base_unit()
d = WindowModalDialog(self, _("Onchain Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
grid.addWidget(QLabel(amount_str), 1, 1)
if len(invoice.outputs) == 1:
grid.addWidget(QLabel(_("Address") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.get_address()), 2, 1)
else:
outputs_str = '\n'.join(map(lambda x: x.address + ' : ' + self.format_amount(x.value)+ self.base_unit(), invoice.outputs))
grid.addWidget(QLabel(_("Outputs") + ':'), 2, 0)
grid.addWidget(QLabel(outputs_str), 2, 1)
grid.addWidget(QLabel(_("Description") + ':'), 3, 0)
grid.addWidget(QLabel(invoice.message), 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.exp + invoice.time)), 4, 1)
if invoice.bip70:
pr = paymentrequest.PaymentRequest(bytes.fromhex(invoice.bip70))
pr.verify(self.contacts)
grid.addWidget(QLabel(_("Requestor") + ':'), 5, 0)
grid.addWidget(QLabel(pr.get_requestor()), 5, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 6, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 6, 1)
def do_export():
key = pr.get_id()
name = str(key) + '.bip70'
fn = getSaveFileName(
parent=self,
title=_("Save invoice to file"),
filename=name,
filter="*.bip70",
config=self.config,
)
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('BIP70 invoice saved as {}').format(fn))
exportButton = EnterButton(_('Export'), do_export)
buttons = Buttons(exportButton, CloseButton(d))
else:
buttons = Buttons(CloseButton(d))
vbox.addLayout(grid)
vbox.addLayout(buttons)
d.exec_()
def show_lightning_invoice(self, invoice: LNInvoice):
lnaddr = lndecode(invoice.invoice, expected_hrp=constants.net.SEGWIT_HRP)
d = WindowModalDialog(self, _("Lightning Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Node ID") + ':'), 0, 0)
grid.addWidget(QLabel(lnaddr.pubkey.serialize().hex()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
amount_str = self.format_amount(invoice.get_amount_sat()) + ' ' + self.base_unit()
grid.addWidget(QLabel(amount_str), 1, 1)
grid.addWidget(QLabel(_("Description") + ':'), 2, 0)
grid.addWidget(QLabel(invoice.message), 2, 1)
grid.addWidget(QLabel(_("Hash") + ':'), 3, 0)
payhash_e = ButtonsLineEdit(lnaddr.paymenthash.hex())
payhash_e.addCopyButton(self.app)
payhash_e.setReadOnly(True)
vbox.addWidget(payhash_e)
grid.addWidget(payhash_e, 3, 1)
if invoice.exp:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(invoice.time + invoice.exp)), 4, 1)
vbox.addLayout(grid)
invoice_e = ShowQRTextEdit(config=self.config)
invoice_e.addCopyButton(self.app)
invoice_e.setText(invoice.invoice)
vbox.addWidget(invoice_e)
vbox.addLayout(Buttons(CloseButton(d),))
d.exec_()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.wallet.db.get("qt-console-history", [])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
'lnutil': lnutil,
})
c = commands.Commands(
config=self.config,
daemon=self.gui_object.daemon,
network=self.network,
callback=lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args, **kwargs: f(method,
args,
self.password_dialog,
**{**kwargs, 'wallet': self.wallet})
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.lightning_button = None
if self.wallet.has_lightning() and self.network:
self.lightning_button = StatusBarButton(read_QIcon("lightning_disconnected.png"), _("Lightning Network"), self.gui_object.show_lightning_dialog)
sb.addPermanentWidget(self.lightning_button)
self.update_lightning_icon()
self.status_button = None
if self.network:
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), self.gui_object.show_network_dialog)
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def create_coincontrol_statusbar(self):
self.coincontrol_sb = sb = QStatusBar()
sb.setSizeGripEnabled(False)
#sb.setFixedHeight(3 * char_width_in_lineedit())
sb.setStyleSheet('QStatusBar::item {border: None;} '
+ ColorScheme.GREEN.as_stylesheet(True))
self.coincontrol_label = QLabel()
self.coincontrol_label.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Preferred)
self.coincontrol_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
sb.addWidget(self.coincontrol_label)
clear_cc_button = EnterButton(_('Reset'), lambda: self.utxo_list.set_spend_list(None))
clear_cc_button.setStyleSheet("margin-right: 5px;")
sb.addPermanentWidget(clear_cc_button)
sb.setVisible(False)
return sb
def set_coincontrol_msg(self, msg: Optional[str]) -> None:
if not msg:
self.coincontrol_label.setText("")
self.coincontrol_sb.setVisible(False)
return
self.coincontrol_label.setText(msg)
self.coincontrol_sb.setVisible(True)
def update_lightning_icon(self):
if self.lightning_button is None:
return
if self.network.lngossip is None:
return
# display colorful lightning icon to signal connection
self.lightning_button.setIcon(read_QIcon("lightning.png"))
cur, total, progress_percent = self.network.lngossip.get_sync_progress_estimate()
# self.logger.debug(f"updating lngossip sync progress estimate: cur={cur}, total={total}")
progress_str = "??%"
if progress_percent is not None:
progress_str = f"{progress_percent}%"
if progress_percent and progress_percent >= 100:
self.lightning_button.setMaximumWidth(25)
self.lightning_button.setText('')
self.lightning_button.setToolTip(_("The Lightning Network graph is fully synced."))
else:
self.lightning_button.setMaximumWidth(25 + 5 * char_width_in_lineedit())
self.lightning_button.setText(progress_str)
self.lightning_button.setToolTip(_("The Lightning Network graph is syncing...\n"
"Payments are more likely to succeed with a more complete graph."))
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
def change_password_dialog(self):
from electrum.storage import StorageEncryptionVersion
if self.wallet.get_available_storage_encryption_version() == StorageEncryptionVersion.XPUB_PASSWORD:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(repr(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_storage=encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_wallet_info(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
vbox = QVBoxLayout()
wallet_type = self.wallet.db.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
# lightning
grid.addWidget(QLabel(_('Lightning') + ':'), 5, 0)
if self.wallet.can_have_lightning():
grid.addWidget(QLabel(_('Enabled')), 5, 1)
local_nodeid = QLabel(self.wallet.lnworker.node_keypair.pubkey.hex())
local_nodeid.setTextInteractionFlags(Qt.TextSelectableByMouse)
grid.addWidget(QLabel(_('Lightning Node ID:')), 6, 0)
grid.addWidget(local_nodeid, 6, 1, 1, 3)
else:
grid.addWidget(QLabel(_("Not available for this wallet.")), 5, 1)
grid.addWidget(HelpButton(_("Lightning is currently restricted to HD wallets with p2wpkh addresses.")), 5, 2)
vbox.addLayout(grid)
labels_clayout = None
if self.wallet.is_deterministic():
keystores = self.wallet.get_keystores()
ks_stack = QStackedWidget()
def select_ks(index):
ks_stack.setCurrentIndex(index)
# only show the combobox in case multiple accounts are available
if len(keystores) > 1:
def label(idx, ks):
if isinstance(self.wallet, Multisig_Wallet) and hasattr(ks, 'label'):
return _("cosigner") + f' {idx+1}: {ks.get_type_text()} {ks.label}'
else:
return _("keystore") + f' {idx+1}'
labels = [label(idx, ks) for idx, ks in enumerate(self.wallet.get_keystores())]
on_click = lambda clayout: select_ks(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Select keystore"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
for ks in keystores:
ks_w = QWidget()
ks_vbox = QVBoxLayout()
ks_vbox.setContentsMargins(0, 0, 0, 0)
ks_w.setLayout(ks_vbox)
mpk_text = ShowQRTextEdit(ks.get_master_public_key(), config=self.config)
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
run_hook('show_xpub_button', mpk_text, ks)
der_path_hbox = QHBoxLayout()
der_path_hbox.setContentsMargins(0, 0, 0, 0)
der_path_hbox.addWidget(QLabel(_("Derivation path") + ':'))
der_path_text = QLabel(ks.get_derivation_prefix() or _("unknown"))
der_path_text.setTextInteractionFlags(Qt.TextSelectableByMouse)
der_path_hbox.addWidget(der_path_text)
der_path_hbox.addStretch()
ks_vbox.addWidget(QLabel(_("Master Public Key")))
ks_vbox.addWidget(mpk_text)
ks_vbox.addLayout(der_path_hbox)
ks_stack.addWidget(ks_w)
select_ks(0)
vbox.addWidget(ks_stack)
vbox.addStretch(1)
btn_export_info = run_hook('wallet_info_buttons', self, dialog)
btn_close = CloseButton(dialog)
btns = Buttons(btn_export_info, btn_close)
vbox.addLayout(btns)
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(repr(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase, config=self.config)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None, *,
help_text=None, show_copy_text_btn=False):
if not data:
return
d = QRDialog(
data=data,
parent=parent or self,
title=title,
help_text=help_text,
show_copy_text_btn=show_copy_text_btn,
config=self.config,
)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(repr(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk, config=self.config)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Qtum address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Qtum address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def sign_pod(self, address=''):
d = WindowModalDialog(self, _('Sign POD'))
d.setMinimumSize(450, 300)
layout = QGridLayout(d)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 1, 0)
layout.addWidget(address_e, 1, 1)
staker_e = QLineEdit()
layout.addWidget(QLabel(_('Staker')), 2, 0)
layout.addWidget(staker_e, 2, 1)
pod_e = QTextEdit()
pod_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('POD')), 3, 0)
layout.addWidget(pod_e, 3, 1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign_pod(address_e, staker_e, pod_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def do_sign_pod(self, address_e, staker_e, pod_e):
staker = staker_e.text().strip()
if not is_hash160(staker):
try:
addr_type, staker = b58_address_to_hash160(staker)
except BaseException:
raise Exception('invalid staker address')
if addr_type != constants.net.ADDRTYPE_P2PKH:
raise Exception('invalid staker address')
staker = staker.hex()
message_e = QTextEdit()
message_e.setText(staker)
self.do_sign(address_e, message_e, pod_e)
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, data: Union[str, bytes]) -> Union[None, 'PartialTransaction', 'Transaction']:
from electrum.transaction import tx_from_any
try:
return tx_from_any(data)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + repr(e))
return
def import_channel_backup(self, encrypted: str):
if not self.question('Import channel backup?'):
return
try:
self.wallet.lnbackups.import_channel_backup(encrypted)
except Exception as e:
self.show_error("failed to import backup" + '\n' + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(repr(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("qtum:"):
self.pay_to_URI(data)
return
if data.startswith('channel_backup:'):
self.import_channel_backup(data)
return
# else if the user scanned an offline signed tx
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self) -> Optional[Transaction]:
fileName = getOpenFileName(
parent=self,
title=_("Select your transaction file"),
filter=TRANSACTION_FILE_EXTENSION_FILTER_ANY,
config=self.config,
)
if not fileName:
return
try:
with open(fileName, "rb") as f:
file_content = f.read() # type: Union[str, bytes]
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason),
title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(
parent=self,
title=_('Input raw transaction'),
header_layout=_("Transaction:"),
ok_label=_("Load transaction"),
config=self.config,
)
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_text_channel_backup(self):
text = text_dialog(
parent=self,
title=_('Input channel backup'),
header_layout=_("Channel Backup:"),
ok_label=_("Load backup"),
config=self.config,
)
if not text:
return
if text.startswith('channel_backup:'):
self.import_channel_backup(text)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except UntrustedServerReturnedError as e:
self.logger.info(f"Error getting transaction from network: {repr(e)}")
self.show_message(_("Error getting transaction from network") + ":\n" + e.get_message_for_gui())
return
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + repr(e))
return
else:
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(repr(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
os.chmod(fileName, 0o600)
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), self.wallet.import_labels, on_import)
def do_export_labels(self):
export_meta_gui(self, _('labels'), self.wallet.export_labels)
def import_invoices(self):
import_meta_gui(self, _('invoices'), self.wallet.import_invoices, self.invoice_list.update)
def export_invoices(self):
export_meta_gui(self, _('invoices'), self.wallet.export_invoices)
def import_requests(self):
import_meta_gui(self, _('requests'), self.wallet.import_requests, self.request_list.update)
def export_requests(self):
export_meta_gui(self, _('requests'), self.wallet.export_requests)
def import_contacts(self):
import_meta_gui(self, _('contacts'), self.contacts.import_file, self.contact_list.update)
def export_contacts(self):
export_meta_gui(self, _('contacts'), self.contacts.export_file)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True, config=self.config)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {repr(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address_for_corruption(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
privkeys = get_pk()
def on_success(result):
coins, keypairs = result
outputs = [PartialTxOutput.from_address_and_value(addr, value='!')]
self.warn_if_watching_only()
self.pay_onchain_dialog(coins, outputs, external_keypairs=keypairs)
def on_failure(exc_info):
self.on_error(exc_info)
msg = _('Preparing sweep transaction...')
task = lambda: self.network.run_from_another_thread(
sweep_preparations(privkeys, self.network))
WaitingDialog(self, msg, task, on_success, on_failure)
def _do_import(self, title, header_layout, func):
text = text_dialog(
parent=self,
title=title,
header_layout=header_layout,
ok_label=_('Import'),
allow_multi=True,
config=self.config,
)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
from .settings_dialog import SettingsDialog
d = SettingsDialog(self, self.config)
self.alias_received_signal.connect(d.set_alias_color)
d.exec_()
self.alias_received_signal.disconnect(d.set_alias_color)
if self.fx:
self.fx.trigger_update()
run_hook('close_settings_dialog')
if d.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# note that closeEvent is NOT called if the user quits with Ctrl-C
self.clean_up()
event.accept()
def clean_up(self):
if self._cleaned_up:
return
self._cleaned_up = True
if self.wallet.thread:
self.wallet.thread.stop()
self.wallet.thread = None
util.unregister_callback(self.on_network)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.db.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.wallet.db.put("qt-console-history", self.console.history[-50:])
if self.qr_window:
self.qr_window.close()
self.close_wallet()
if self._update_check_thread:
self._update_check_thread.exit()
self._update_check_thread.wait()
if self.tray:
self.tray = None
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p: Optional['BasePlugin'], name: str, i: int):
widget = settings_widgets.get(name) # type: Optional[QWidget]
if widget and not p:
# plugin got disabled, rm widget
grid.removeWidget(widget)
widget.setParent(None)
settings_widgets.pop(name)
elif widget is None and p and p.requires_settings() and p.is_enabled():
# plugin got enabled, add widget
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
# note: all enabled plugins will receive this hook:
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx: Transaction, new_tx: PartialTransaction) -> None:
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_txid = parent_tx.txid()
assert parent_txid
parent_fee = self.wallet.get_tx_fee(parent_txid)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
fee_for_child = fee_e.get_amount()
if fee_for_child is None:
return
out_amt = max_fee - fee_for_child
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_for_child
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(fee_combo, 4, 2)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee is None:
return # fee left empty, treat is as "cancel"
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label_for_txid(txid)
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, coins=self.get_coins())
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_desc=tx_label)
def dscancel_dialog(self, tx: Transaction):
txid = tx.txid()
assert txid
fee = self.wallet.get_tx_fee(txid)
if fee is None:
self.show_error(_('Cannot cancel transaction') + ': ' + _('unknown fee for original transaction'))
return
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Cancel transaction'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Cancel an unconfirmed RBF transaction by double-spending "
"its inputs back to your wallet with a higher fee.")))
grid = QGridLayout()
grid.addWidget(QLabel(_('Current Fee') + ':'), 0, 0)
grid.addWidget(QLabel(self.format_amount(fee) + ' ' + self.base_unit()), 0, 1)
grid.addWidget(QLabel(_('Current Fee rate') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_fee_rate(1000 * old_fee_rate)), 1, 1)
grid.addWidget(QLabel(_('New Fee rate') + ':'), 2, 0)
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
grid.addWidget(feerate_e, 2, 1)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_combo = FeeComboBox(fee_slider)
fee_slider.deactivate()
grid.addWidget(fee_slider, 3, 1)
grid.addWidget(fee_combo, 3, 2)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.dscancel(tx=tx, new_fee_rate=new_fee_rate)
except CannotDoubleSpendTx as e:
self.show_error(str(e))
return
self.show_transaction(new_tx)
def save_transaction_into_wallet(self, tx: Transaction):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_db()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_cert_mismatch_error(self):
if self.showing_cert_mismatch_error:
return
self.showing_cert_mismatch_error = True
self.show_critical(title=_("Certificate mismatch"),
msg=_("The SSL certificate provided by the main server did not match the fingerprint passed in with the --serverfingerprint option.") + "\n\n" +
_("Electrum will now exit."))
self.showing_cert_mismatch_error = False
self.close()
def disable_opsender(self) -> bool:
return self.config.get("disable_opsender", True) or \
self.network.get_server_height() <= constants.net.QIP5_FORK_HEIGHT
def set_token(self, token: 'Token'):
self.wallet.add_token(token)
self.token_balance_list.update()
self.token_hist_list.update()
self.token_hist_model.refresh('set_token')
def delete_token(self, key: str):
token_name = self.wallet.db.get_token(key).name
if not self.question(_("Remove {} from your token list ?")
.format(token_name)):
return
self.wallet.delete_token(key)
self.token_balance_list.update()
self.token_hist_model.refresh('delete_token')
def create_tokens_tab(self):
from .token_list import TokenBalanceList, TokenHistoryModel, TokenHistoryList
self.token_balance_list = tbl = TokenBalanceList(self)
self.token_hist_model = TokenHistoryModel(self)
self.token_hist_list = thl = TokenHistoryList(self, self.token_hist_model)
self.token_hist_model.set_view(self.token_hist_list)
splitter = QSplitter(self)
splitter.addWidget(tbl)
splitter.addWidget(thl)
splitter.setOrientation(Qt.Vertical)
return splitter
def token_add_dialog(self):
if isinstance(self.wallet.keystore, TrezorKeyStore):
self.show_message('Trezor does not support QRC20 Token for now')
return
d = TokenAddDialog(self)
d.show()
def token_info_dialog(self, token: 'Token'):
d = TokenInfoDialog(self, token)
d.show()
def token_send_dialog(self, token: 'Token'):
d = TokenSendDialog(self, token)
d.show()
def do_token_pay(self, token: 'Token', pay_to: str, amount: int, gas_limit: int, gas_price: int, dialog, preview=False):
try:
datahex = 'a9059cbb{}{:064x}'.format(pay_to.zfill(64), amount)
op_sender = None if self.disable_opsender() else token.bind_addr
script = contract_script(gas_limit, gas_price, datahex, token.contract_addr, opcodes.OP_CALL, op_sender)
outputs = [PartialTxOutput(scriptpubkey=script, value=0)]
tx_desc = _('Pay out {} {}').format(amount / (10 ** token.decimals), token.symbol)
self._smart_contract_broadcast(outputs, tx_desc, gas_limit * gas_price,
token.bind_addr, dialog, None, preview)
except (BaseException,) as e:
traceback.print_exc(file=sys.stderr)
dialog.show_message(str(e))
def set_delegation(self, dele: 'Delegation'):
self.wallet.add_delegation(dele)
self.delegation_list.update()
def delete_delegation(self, addr: str):
self.wallet.delete_delegation(addr)
self.delegation_list.update()
def call_add_delegation(self, addr: str, staker: str, fee: int, gas_limit: int, gas_price: int, dialog, pod: Optional[bytes]):
"""
:param staker: hash160 str
"""
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(_("Enter your password to proceed"))
if not password: return
if not pod:
pod = self.wallet.sign_message(addr, staker, password)
if len(pod) != 65:
raise Exception("incorrect POD length")
args = [staker.lower(), fee, pod]
self.sendto_smart_contract(DELEGATION_CONTRACT, DELEGATE_ABI[1], args,
gas_limit, gas_price, 0, addr, dialog, False, tx_desc="update delegation")
def call_remove_delegation(self, addr: str, gas_limit: int, gas_price: int, dialog):
self.sendto_smart_contract(DELEGATION_CONTRACT, DELEGATE_ABI[0], [],
gas_limit, gas_price, 0, addr, dialog, False, tx_desc="remove delegation")
def create_delegations_tab(self):
from .delegation_list import DelegationList
self.delegation_list = l = DelegationList(self)
return self.create_list_tab(l)
def delegation_dialog(self, dele: 'Delegation' = None, mode='add'):
if isinstance(self.wallet.keystore, TrezorKeyStore):
self.show_message('Trezor does not support staking delegation for now')
return
if self.network.get_server_height() < constants.net.OFFLINE_STAKE_HEIGHT:
self.show_message('Offline staking not activated')
return
d = DelegationDialog(self, dele, mode)
d.show()
def _smart_contract_broadcast(self, outputs: list, desc: str, gas_fee: int, sender: str, dialog,
broadcast_done=None, preview=False):
addr_type, __ = b58_address_to_hash160(sender)
if not addr_type == constants.net.ADDRTYPE_P2PKH:
dialog.show_message(_('only P2PKH address can call contract'))
return
coins = self.get_coins()
make_tx = lambda fee_est: self.wallet.make_unsigned_transaction(coins=coins,
outputs=outputs,
fee=fee_est,
change_addr=sender,
gas_fee=gas_fee,
sender=sender,
is_sweep=False)
output_values = [x.value for x in outputs]
if output_values.count('!') > 1:
self.show_error(_("More than one output set to spend max"))
return
output_value = '!' if '!' in output_values else sum(output_values)
try:
d = ConfirmTxDialog(window=self, make_tx=make_tx, output_value=output_value, is_sweep=False, gas_fee=gas_fee)
except SenderNoUTXOException as e:
self.show_error(str(e))
return
if d.not_enough_funds:
# Check if we had enough funds excluding fees,
# if so, still provide opportunity to set lower fees.
if not d.have_enough_funds_assuming_zero_fees():
self.show_message(_('Not Enough Funds'))
return
# shortcut to advanced preview (after "enough funds" check!)
if preview or self.config.get('advanced_preview'):
self.preview_tx_dialog(make_tx=make_tx)
return
cancelled, is_send, password, tx = d.run()
if cancelled:
return
if tx is None:
self.show_message(_('transaction is None'))
return
if is_send:
def sign_done(success):
if success:
self.broadcast_or_show(tx, broadcast_done=broadcast_done)
if desc is not None:
self.wallet.set_label(tx.txid(), desc)
self.sign_tx_with_password(tx, callback=sign_done, password=password)
else:
self.preview_tx_dialog(make_tx=make_tx)
def create_smart_contract_tab(self):
from .smart_contract_list import SmartContractList
self.smart_contract_list = l = SmartContractList(self)
return self.create_list_tab(l)
def set_smart_contract(self, name: str, address: str, interface: list) -> bool:
if not is_hash160(address):
self.show_error(_('Invalid Address'))
self.smart_contract_list.update()
return False
self.wallet.db.smart_contracts[address] = (name, interface)
self.smart_contract_list.update()
return True
def delete_samart_contact(self, address: str) -> bool:
if not self.question(_("Remove {} from your list of smart contracts?".format(
self.wallet.db.smart_contracts[address][0]))):
return False
self.wallet.db.smart_contracts.pop(address)
self.smart_contract_list.update()
return True
def call_smart_contract(self, address: str, func: dict, args: list, sender: str, dialog):
data = eth_abi_encode(func, args)
try:
result = self.network.run_from_another_thread(self.network.call_contract(address, data, sender))
except BaseException as e:
self.logger.exception('')
dialog.show_message(str(e))
return
types = list([x['type'] for x in func.get('outputs', [])])
try:
if isinstance(result, dict):
except_msg = result.get('executionResult', {}).get('exceptedMessage', '')
if len(except_msg) > 1:
dialog.show_message(f"exceptedMessage: {except_msg}")
return
output = eth_abi.decode_abi(types, binascii.a2b_hex(result['executionResult']['output']))
else:
output = eth_abi.decode_abi(types, binascii.a2b_hex(result))
def decode_x(x):
if isinstance(x, bytes):
try:
return x.decode()
except UnicodeDecodeError:
return str(x)
return str(x)
output = ','.join([decode_x(x) for x in output])
dialog.show_message(output)
except (BaseException,) as e:
self.logger.exception('')
dialog.show_message(f'{e} {result}')
def sendto_smart_contract(self, address: str, func: dict, args: list,
gas_limit: int, gas_price: int, amount: int, sender: str,
dialog, preview, tx_desc=None):
try:
abi_encoded = eth_abi_encode(func, args)
op_sender = None if self.disable_opsender() else sender
script = contract_script(gas_limit, gas_price, abi_encoded, address, opcodes.OP_CALL, op_sender)
outputs = [PartialTxOutput(scriptpubkey=script, value=amount)]
if tx_desc is None:
tx_desc = 'contract sendto {}'.format(self.wallet.db.smart_contracts.get(address, [address, ])[0])
self._smart_contract_broadcast(outputs, tx_desc, gas_limit * gas_price, sender, dialog, None, preview)
except (BaseException,) as e:
self.logger.exception('')
dialog.show_message(str(e))
def create_smart_contract(self, name: str, bytecode: str, abi: list, constructor: dict,
args: list, gas_limit: int, gas_price: int, sender: str, dialog, preview):
def broadcast_done(tx):
s = tx.outputs()[0].scriptpubkey
if decode_opcreate_script(s) or decode_opsender_script(s):
reversed_txid = binascii.a2b_hex(tx.txid())[::-1]
output_index = b'\x00\x00\x00\x00'
contract_addr = hash_160(reversed_txid + output_index).hex()
self.set_smart_contract(name, contract_addr, abi)
else:
self.logger.debug("the smart contract created seems to be invalid")
try:
abi_encoded = ''
if constructor:
abi_encoded = eth_abi_encode(constructor, args)
op_sender = None if self.disable_opsender() else sender
script = contract_script(gas_limit, gas_price, bytecode + abi_encoded, None, opcodes.OP_CREATE, op_sender)
outputs = [PartialTxOutput(scriptpubkey=script, value=0)]
self._smart_contract_broadcast(outputs, 'create contract {}'.format(name), gas_limit * gas_price,
sender, dialog, broadcast_done, preview)
except (BaseException,) as e:
self.logger.exception('')
dialog.show_message(str(e))
def contract_create_dialog(self):
d = ContractCreateDialog(self)
d.show()
def contract_add_dialog(self):
d = ContractEditDialog(self)
d.show()
def contract_edit_dialog(self, address: str):
name, interface = self.wallet.db.smart_contracts[address]
contract = {
'name': name,
'interface': interface,
'address': address
}
d = ContractEditDialog(self, contract)
d.show()
def contract_func_dialog(self, address: str):
name, interface = self.wallet.db.smart_contracts[address]
contract = {
'name': name,
'interface': interface,
'address': address
}
d = ContractFuncDialog(self, contract)
d.show()
|
multiprocessing_t.py | #How can we share data between processes?
# Processes don't live in the same memory, so they don't have access to same public data and need special access
# to shared memory data. There are two shared memory objects that we can use.
#1] Value
#2] Array
from multiprocessing import Process, Value, Array, Lock
import numbers
import os
import time
def add100(num,lock):
time.sleep(0.01) #when locks are not applied process swithching will take place here
for i in range(100):
with lock:
num.value += 1
def sum_array(arr,lock):
time.sleep(0.01)
#each thread will add 100 to each number in the shared array
#so 2 threads means 200 will be added to each number of the array
for i in range(100):
for i in range(len(arr)):
with lock:
arr[i] += 1
def square(numbers,q):
for i in numbers:
q.put(i*i)
def make_negative(numbers,q):
for i in numbers:
q.put(-1*i)
def cube(number):
return number**3
if __name__ == '__main__':
lock = Lock()
#here we create a shared variable which can be accessed by all the processes
shared_number = Value('i',0) #starting value is 0 and is of type int (i)
print(f'Number at beginning is {shared_number.value}')
p1 = Process(target=add100,args=(shared_number,lock))
p2 = Process(target=add100,args=(shared_number,lock))
p1.start()
p2.start()
p1.join()
p2.join()
print(f'Number at end is {shared_number.value}')
#for a shared Array
array = Array('d',[1.4,2.2,3.4,4.6,5.8])
print(f'Array at beginning is {array[:]}')
p1 = Process(target=sum_array,args=(array,lock))
p2 = Process(target=sum_array,args=(array,lock))
p1.start()
p2.start()
p1.join()
p2.join()
print(f'Array at end is {array[:]}')
#------------------------------------------------------------------------------------------
from multiprocessing import Queue
q = Queue()
p1 = Process(target=square,args=(numbers,q))
p2 = Process(target=make_negative,args=(numbers,q))
p1.start()
p2.start()
p1.join()
p2.join()
while not q.empty():
print(q.get())
#Both processors will have access to the queue
#A process pool is used to contorl multiple processes
# A process pool object controls a pool of worker processes to which jobs can be submitted
#then it can use the available processors for you and for example split data into smaller chunks which
# can then be processed in parallel by different processors
from multiprocessing import Pool
pool = Pool()
numbers = range(10)
#4 important methods (there are more but these are mostlt used)
#map, apply, join, close
result = pool.map(cube,numbers) #This will automatically divide the work among processors and split the iterable into equal chunks and allocate to the processors
pool.apply(cube,numbers[0]) #execute the process with
pool.close() #remember to do this
pool.join() #wait for pool to process all calculations and return results
print(result)
|
writer.py | import os
import time
from threading import Thread
from queue import Queue
import cv2
import numpy as np
import torch
import torch.multiprocessing as mp
from alphapose.utils.transforms import get_func_heatmap_to_coord
from alphapose.utils.pPose_nms import pose_nms, write_json
DEFAULT_VIDEO_SAVE_OPT = {
'savepath': 'examples/res/1.mp4',
'fourcc': cv2.VideoWriter_fourcc(*'mp4v'),
'fps': 25,
'frameSize': (640, 480)
}
EVAL_JOINTS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
class DataWriter():
def __init__(self, cfg, opt, save_video=False,
video_save_opt=DEFAULT_VIDEO_SAVE_OPT,
queueSize=1024):
self.cfg = cfg
self.opt = opt
self.video_save_opt = video_save_opt
self.eval_joints = EVAL_JOINTS
self.save_video = save_video
self.heatmap_to_coord = get_func_heatmap_to_coord(cfg)
# initialize the queue used to store frames read from
# the video file
if opt.sp:
self.result_queue = Queue(maxsize=queueSize)
else:
self.result_queue = mp.Queue(maxsize=queueSize)
if opt.save_img:
if not os.path.exists(opt.outputpath + '/vis'):
os.mkdir(opt.outputpath + '/vis')
if opt.pose_flow:
from trackers.PoseFlow.poseflow_infer import PoseFlowWrapper
self.pose_flow_wrapper = PoseFlowWrapper(save_path=os.path.join(opt.outputpath, 'poseflow'))
def start_worker(self, target):
if self.opt.sp:
p = Thread(target=target, args=())
else:
p = mp.Process(target=target, args=())
# p.daemon = True
p.start()
return p
def start(self):
# start a thread to read pose estimation results per frame
self.result_worker = self.start_worker(self.update)
return self
def update(self):
final_result = []
norm_type = self.cfg.LOSS.get('NORM_TYPE', None)
hm_size = self.cfg.DATA_PRESET.HEATMAP_SIZE
if self.save_video:
# initialize the file video stream, adapt ouput video resolution to original video
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
if not stream.isOpened():
print("Try to use other video encoders...")
ext = self.video_save_opt['savepath'].split('.')[-1]
fourcc, _ext = self.recognize_video_ext(ext)
self.video_save_opt['fourcc'] = fourcc
self.video_save_opt['savepath'] = self.video_save_opt['savepath'][:-4] + _ext
stream = cv2.VideoWriter(*[self.video_save_opt[k] for k in ['savepath', 'fourcc', 'fps', 'frameSize']])
assert stream.isOpened(), 'Cannot open video for writing'
# keep looping infinitelyd
while True:
# ensure the queue is not empty and get item
(boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name) = self.wait_and_get(self.result_queue)
if orig_img is None:
# if the thread indicator variable is set (img is None), stop the thread
if self.save_video:
stream.release()
write_json(final_result, self.opt.outputpath, form=self.opt.format, for_eval=self.opt.eval)
print("Results have been written to json.")
return
# image channel RGB->BGR
orig_img = np.array(orig_img, dtype=np.uint8)[:, :, ::-1]
if boxes is None or len(boxes) == 0:
if self.opt.save_img or self.save_video or self.opt.vis:
self.write_image(orig_img, im_name, stream=stream if self.save_video else None)
else:
# location prediction (n, kp, 2) | score prediction (n, kp, 1)
assert hm_data.dim() == 4
#pred = hm_data.cpu().data.numpy()
if hm_data.size()[1] == 136:
self.eval_joints = [*range(0,136)]
elif hm_data.size()[1] == 26:
self.eval_joints = [*range(0,26)]
pose_coords = []
pose_scores = []
for i in range(hm_data.shape[0]):
bbox = cropped_boxes[i].tolist()
pose_coord, pose_score = self.heatmap_to_coord(hm_data[i][self.eval_joints], bbox, hm_shape=hm_size, norm_type=norm_type)
pose_coords.append(torch.from_numpy(pose_coord).unsqueeze(0))
pose_scores.append(torch.from_numpy(pose_score).unsqueeze(0))
preds_img = torch.cat(pose_coords)
preds_scores = torch.cat(pose_scores)
if not self.opt.pose_track:
boxes, scores, ids, preds_img, preds_scores, pick_ids = \
pose_nms(boxes, scores, ids, preds_img, preds_scores, self.opt.min_box_area)
_result = []
for k in range(len(scores)):
_result.append(
{
'keypoints':preds_img[k],
'kp_score':preds_scores[k],
'proposal_score': torch.mean(preds_scores[k]) + scores[k] + 1.25 * max(preds_scores[k]),
'idx':ids[k],
'box':[boxes[k][0], boxes[k][1], boxes[k][2]-boxes[k][0],boxes[k][3]-boxes[k][1]]
}
)
result = {
'imgname': im_name,
'result': _result
}
if self.opt.pose_flow:
poseflow_result = self.pose_flow_wrapper.step(orig_img, result)
for i in range(len(poseflow_result)):
result['result'][i]['idx'] = poseflow_result[i]['idx']
final_result.append(result)
if self.opt.save_img or self.save_video or self.opt.vis:
if hm_data.size()[1] == 49:
from alphapose.utils.vis import vis_frame_dense as vis_frame
elif self.opt.vis_fast:
from alphapose.utils.vis import vis_frame_fast as vis_frame
else:
from alphapose.utils.vis import vis_frame
img = vis_frame(orig_img, result, self.opt)
self.write_image(img, im_name, stream=stream if self.save_video else None)
def write_image(self, img, im_name, stream=None):
if self.opt.vis:
cv2.imshow("AlphaPose Demo", img)
cv2.waitKey(30)
if self.opt.save_img:
cv2.imwrite(os.path.join(self.opt.outputpath, 'vis', im_name), img)
if self.save_video:
stream.write(img)
def wait_and_put(self, queue, item):
queue.put(item)
def wait_and_get(self, queue):
return queue.get()
def save(self, boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name):
# save next frame in the queue
self.wait_and_put(self.result_queue, (boxes, scores, ids, hm_data, cropped_boxes, orig_img, im_name))
def running(self):
# indicate that the thread is still running
return not self.result_queue.empty()
def count(self):
# indicate the remaining images
return self.result_queue.qsize()
def stop(self):
# indicate that the thread should be stopped
self.save(None, None, None, None, None, None, None)
self.result_worker.join()
def terminate(self):
# directly terminate
self.result_worker.terminate()
def clear_queues(self):
self.clear(self.result_queue)
def clear(self, queue):
while not queue.empty():
queue.get()
def results(self):
# return final result
print(self.final_result)
return self.final_result
def recognize_video_ext(self, ext=''):
if ext == 'mp4':
return cv2.VideoWriter_fourcc(*'mp4v'), '.' + ext
elif ext == 'avi':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
elif ext == 'mov':
return cv2.VideoWriter_fourcc(*'XVID'), '.' + ext
else:
print("Unknow video format {}, will use .mp4 instead of it".format(ext))
return cv2.VideoWriter_fourcc(*'mp4v'), '.mp4'
|
file_stream.py | import base64
import binascii
import collections
import itertools
import logging
import os
import sys
import random
import requests
import threading
import time
import wandb
from wandb import util
from wandb import env
import six
from six.moves import queue
from ..lib import file_stream_utils
logger = logging.getLogger(__name__)
Chunk = collections.namedtuple("Chunk", ("filename", "data"))
class DefaultFilePolicy(object):
def __init__(self, start_chunk_id=0):
self._chunk_id = start_chunk_id
def process_chunks(self, chunks):
chunk_id = self._chunk_id
self._chunk_id += len(chunks)
return {"offset": chunk_id, "content": [c.data for c in chunks]}
class JsonlFilePolicy(DefaultFilePolicy):
def process_chunks(self, chunks):
chunk_id = self._chunk_id
# TODO: chunk_id is getting reset on each request...
self._chunk_id += len(chunks)
chunk_data = []
for chunk in chunks:
if len(chunk.data) > util.MAX_LINE_SIZE:
msg = "Metric data exceeds maximum size of {} ({})".format(
util.to_human_size(util.MAX_LINE_SIZE),
util.to_human_size(len(chunk.data)),
)
wandb.termerror(msg, repeat=False)
util.sentry_message(msg)
else:
chunk_data.append(chunk.data)
return {
"offset": chunk_id,
"content": chunk_data,
}
class SummaryFilePolicy(DefaultFilePolicy):
def process_chunks(self, chunks):
data = chunks[-1].data
if len(data) > util.MAX_LINE_SIZE:
msg = "Summary data exceeds maximum size of {}. Dropping it.".format(
util.to_human_size(util.MAX_LINE_SIZE)
)
wandb.termerror(msg, repeat=False)
util.sentry_message(msg)
return False
return {"offset": 0, "content": [data]}
class CRDedupeFilePolicy(DefaultFilePolicy):
"""File stream policy that removes characters that would be erased by
carriage returns.
This is what a terminal does. We use it for console output to reduce the
amount of data we need to send over the network (eg. for progress bars),
while preserving the output's appearance in the web app.
"""
def __init__(self, start_chunk_id=0):
super(CRDedupeFilePolicy, self).__init__(start_chunk_id=start_chunk_id)
self._prev_chunk = None
def process_chunks(self, chunks):
ret = []
flag = bool(self._prev_chunk)
chunk_id = self._chunk_id
for c in chunks:
# Line has two possible formats:
# 1) "2020-08-25T20:38:36.895321 this is my line of text"
# 2) "ERROR 2020-08-25T20:38:36.895321 this is my line of text"
prefix = ""
token, rest = c.data.split(" ", 1)
is_err = False
if token == "ERROR":
is_err = True
prefix += token + " "
token, rest = rest.split(" ", 1)
prefix += token + " "
lines = rest.split(os.linesep)
for line in lines:
if line.startswith("\r"):
found = False
for i in range(len(ret) - 1, -1, -1):
if ret[i].startswith("ERROR ") == is_err:
ret[i] = prefix + line[1:] + "\n"
found = True
break
if not found:
if flag:
flag = False
prev_ret = self._prev_chunk["content"]
for i in range(len(prev_ret) - 1, -1, -1):
if prev_ret[i].startswith("ERROR ") == is_err:
prev_ret[i] = prefix + line[1:] + "\n"
found = True
break
if found:
chunk_id = self._prev_chunk["offset"]
ret = prev_ret + ret
else:
ret.append(prefix + line[1:] + "\n")
else:
ret.append(prefix + line[1:] + "\n")
elif line:
ret.append(prefix + line + "\n")
self._chunk_id = chunk_id + len(ret)
ret = {"offset": chunk_id, "content": ret}
self._prev_chunk = ret
return ret
class BinaryFilePolicy(DefaultFilePolicy):
def process_chunks(self, chunks):
data = b"".join([c.data for c in chunks])
enc = base64.b64encode(data).decode("ascii")
offset = self._offset
self._offset += len(data)
return {"offset": self._offset, "content": enc, "encoding": "base64"}
class FileStreamApi(object):
"""Pushes chunks of files to our streaming endpoint.
This class is used as a singleton. It has a thread that serializes access to
the streaming endpoint and performs rate-limiting and batching.
TODO: Differentiate between binary/text encoding.
"""
Finish = collections.namedtuple("Finish", ("exitcode"))
Preempting = collections.namedtuple("Preempting", ())
HTTP_TIMEOUT = env.get_http_timeout(10)
MAX_ITEMS_PER_PUSH = 10000
def __init__(self, api, run_id, start_time, settings=None):
if settings is None:
settings = dict()
# NOTE: exc_info is set in thread_except_body context and readable by calling threads
self._exc_info = None
self._settings = settings
self._api = api
self._run_id = run_id
self._start_time = start_time
self._client = requests.Session()
self._client.auth = ("api", api.api_key)
self._client.timeout = self.HTTP_TIMEOUT
self._client.headers.update(
{
"User-Agent": api.user_agent,
"X-WANDB-USERNAME": env.get_username(),
"X-WANDB-USER-EMAIL": env.get_user_email(),
}
)
self._file_policies = {}
self._queue = queue.Queue()
self._thread = threading.Thread(target=self._thread_except_body)
# It seems we need to make this a daemon thread to get sync.py's atexit handler to run, which
# cleans this thread up.
self._thread.name = "FileStreamThread"
self._thread.daemon = True
self._init_endpoint()
def _init_endpoint(self):
settings = self._api.settings()
settings.update(self._settings)
self._endpoint = "{base}/files/{entity}/{project}/{run}/file_stream".format(
base=settings["base_url"],
entity=settings["entity"],
project=settings["project"],
run=self._run_id,
)
def start(self):
self._init_endpoint()
self._thread.start()
def set_default_file_policy(self, filename, file_policy):
"""Set an upload policy for a file unless one has already been set.
"""
if filename not in self._file_policies:
self._file_policies[filename] = file_policy
def set_file_policy(self, filename, file_policy):
self._file_policies[filename] = file_policy
@property
def heartbeat_seconds(self):
# Defaults to 30
return self._api.dynamic_settings["heartbeat_seconds"]
def rate_limit_seconds(self):
run_time = time.time() - self._start_time
if run_time < 60:
return max(1, self.heartbeat_seconds / 15)
elif run_time < 300:
return max(2.5, self.heartbeat_seconds / 3)
else:
return max(5, self.heartbeat_seconds)
def _read_queue(self):
# called from the push thread (_thread_body), this does an initial read
# that'll block for up to rate_limit_seconds. Then it tries to read
# as much out of the queue as it can. We do this because the http post
# to the server happens within _thread_body, and can take longer than
# our rate limit. So next time we get a chance to read the queue we want
# read all the stuff that queue'd up since last time.
#
# If we have more than MAX_ITEMS_PER_PUSH in the queue then the push thread
# will get behind and data will buffer up in the queue.
return util.read_many_from_queue(
self._queue, self.MAX_ITEMS_PER_PUSH, self.rate_limit_seconds()
)
def _thread_body(self):
posted_data_time = time.time()
posted_anything_time = time.time()
ready_chunks = []
finished = None
while finished is None:
items = self._read_queue()
for item in items:
if isinstance(item, self.Finish):
finished = item
elif isinstance(item, self.Preempting):
request_with_retry(
self._client.post,
self._endpoint,
json={"complete": False, "preempting": True},
)
else:
# item is Chunk
ready_chunks.append(item)
cur_time = time.time()
if ready_chunks and (
finished or cur_time - posted_data_time > self.rate_limit_seconds()
):
posted_data_time = cur_time
posted_anything_time = cur_time
self._send(ready_chunks)
ready_chunks = []
if cur_time - posted_anything_time > self.heartbeat_seconds:
posted_anything_time = cur_time
self._handle_response(
request_with_retry(
self._client.post,
self._endpoint,
json={"complete": False, "failed": False},
)
)
# post the final close message. (item is self.Finish instance now)
request_with_retry(
self._client.post,
self._endpoint,
json={"complete": True, "exitcode": int(finished.exitcode)},
)
def _thread_except_body(self):
# TODO: Consolidate with internal_util.ExceptionThread
try:
self._thread_body()
except Exception as e:
exc_info = sys.exc_info()
self._exc_info = exc_info
logger.exception("generic exception in filestream thread")
util.sentry_exc(exc_info, delay=True)
raise e
def _handle_response(self, response):
"""Logs dropped chunks and updates dynamic settings"""
if isinstance(response, Exception):
wandb.termerror("Droppped streaming file chunk (see wandb/debug.log)")
logging.error("dropped chunk %s" % response)
raise response
else:
parsed = None
try:
parsed = response.json()
except Exception:
pass
if isinstance(parsed, dict):
limits = parsed.get("limits")
if isinstance(limits, dict):
self._api.dynamic_settings.update(limits)
def _send(self, chunks):
# create files dict. dict of <filename: chunks> pairs where chunks is a list of
# [chunk_id, chunk_data] tuples (as lists since this will be json).
files = {}
# Groupby needs group keys to be consecutive, so sort first.
chunks.sort(key=lambda c: c.filename)
for filename, file_chunks in itertools.groupby(chunks, lambda c: c.filename):
file_chunks = list(file_chunks) # groupby returns iterator
# Specific file policies are set by internal/sender.py
self.set_default_file_policy(filename, DefaultFilePolicy())
files[filename] = self._file_policies[filename].process_chunks(file_chunks)
if not files[filename]:
del files[filename]
for fs in file_stream_utils.split_files(files, max_mb=10):
self._handle_response(
request_with_retry(
self._client.post,
self._endpoint,
json={"files": fs},
retry_callback=self._api.retry_callback,
)
)
def stream_file(self, path):
name = path.split("/")[-1]
with open(path) as f:
self._send([Chunk(name, line) for line in f])
def enqueue_preempting(self):
self._queue.put(self.Preempting())
def push(self, filename, data):
"""Push a chunk of a file to the streaming endpoint.
Arguments:
filename: Name of file that this is a chunk of.
chunk_id: TODO: change to 'offset'
chunk: File data.
"""
self._queue.put(Chunk(filename, data))
def finish(self, exitcode):
"""Cleans up.
Anything pushed after finish will be dropped.
Arguments:
exitcode: The exitcode of the watched process.
"""
self._queue.put(self.Finish(exitcode))
# TODO(jhr): join on a thread which exited with an exception is a noop, clean up this path
self._thread.join()
if self._exc_info:
logger.error("FileStream exception", exc_info=self._exc_info)
# reraising the original exception, will get recaught in internal.py for the sender thread
six.reraise(*self._exc_info)
MAX_SLEEP_SECONDS = 60 * 5
def request_with_retry(func, *args, **kwargs):
"""Perform a requests http call, retrying with exponential backoff.
Arguments:
func: An http-requesting function to call, like requests.post
max_retries: Maximum retries before giving up. By default we retry 30 times in ~2 hours before dropping the chunk
*args: passed through to func
**kwargs: passed through to func
"""
max_retries = kwargs.pop("max_retries", 30)
retry_callback = kwargs.pop("retry_callback", None)
sleep = 2
retry_count = 0
while True:
try:
response = func(*args, **kwargs)
response.raise_for_status()
return response
except (
requests.exceptions.ConnectionError,
requests.exceptions.HTTPError,
requests.exceptions.Timeout,
) as e:
if isinstance(e, requests.exceptions.HTTPError):
# Non-retriable HTTP errors.
#
# We retry 500s just to be cautious, and because the back end
# returns them when there are infrastructure issues. If retrying
# some request winds up being problematic, we'll change the
# back end to indicate that it shouldn't be retried.
if e.response is not None and e.response.status_code in {
400,
403,
404,
409,
}:
return e
if retry_count == max_retries:
return e
retry_count += 1
delay = sleep + random.random() * 0.25 * sleep
if isinstance(e, requests.exceptions.HTTPError) and (
e.response is not None and e.response.status_code == 429
):
err_str = "Filestream rate limit exceeded, retrying in {} seconds".format(
delay
)
if retry_callback:
retry_callback(e.response.status_code, err_str)
logger.info(err_str)
else:
pass
logger.warning(
"requests_with_retry encountered retryable exception: %s. func: %s, args: %s, kwargs: %s",
e,
func,
args,
kwargs,
)
time.sleep(delay)
sleep *= 2
if sleep > MAX_SLEEP_SECONDS:
sleep = MAX_SLEEP_SECONDS
except requests.exceptions.RequestException as e:
error_message = "unknown error"
try:
error_message = response.json()["error"] # XXX clean this up
except Exception:
pass
logger.error("requests_with_retry error: {}".format(error_message))
logger.exception(
"requests_with_retry encountered unretryable exception: %s", e
)
return e
|
app.py | """ Main Kivy application """
import asyncio
import os
from logging import getLogger
from threading import Thread
from sys import platform as _platform
from tesseractXplore.settings import read_settings
# Set GL backend before any kivy modules are imported
os.environ['KIVY_GL_BACKEND'] = 'sdl2'
# Set Textprovider backend before any kivy modules are imported
settings = read_settings()
if settings['display']['pil_textprovider'] == 'down':
os.environ['KIVY_TEXT'] = 'pil'
elif settings['display']['pygame_textprovider'] == 'down':
os.environ['KIVY_TEXT'] = 'pygame'
elif settings['display']['pango_textprovider'] == 'down':
os.environ['KIVY_TEXT'] = 'pango'
# TODO: Make it as setting
from kivy.clock import Clock
# Disable multitouch emulation before any other kivy modules are imported
from kivy.config import Config
Config.set('input', 'mouse', 'mouse,multitouch_on_demand')
from kivy.core.clipboard import Clipboard
from kivy.core.window import Window
from kivy.properties import ObjectProperty
from kivymd.app import MDApp
from tesseractXplore.app import alert
from tesseractXplore.app.screens import HOME_SCREEN, Root, load_screens
from tesseractXplore.tessprofiles import read_tessprofiles
from tesseractXplore.constants import (
INIT_WINDOW_POSITION,
INIT_WINDOW_SIZE,
MD_PRIMARY_PALETTE,
MD_ACCENT_PALETTE,
BACKSPACE,
ENTER,
F11, TRIGGER_DELAY,
)
from tesseractXplore.controllers import (
ImageSelectionController,
FulltextViewController,
ImageEditorController,
SettingsController,
ModelListController,
ModelSearchController,
ModelSelectionController,
ModelViewController,
TessprofilesController,
TesseractController,
DiffStdoutController,
)
from tesseractXplore.widgets import ModelListItem
logger = getLogger().getChild(__name__)
class ControllerProxy:
""" The individual controllers need to talk to each other sometimes.
Any such interactions go through this class so they don't talk to each other directly.
This also just serves as documentation for these interactions so I don't lose track of them.
"""
image_selection_controller = ObjectProperty()
fulltext_view_controller = ObjectProperty()
image_edit_controller = ObjectProperty()
model_search_controller = ObjectProperty()
model_selection_controller = ObjectProperty()
model_view_controller = ObjectProperty()
modellist_controller = ObjectProperty()
tessprofiles_controller = ObjectProperty()
settings_controller = ObjectProperty()
diffstdout_controller = ObjectProperty()
def init_controllers(self, screens):
# Init OS-specific errorcodes
self._platform = _platform
self.errorcodes = [1,127] if _platform in ["win32","win64"] else [127]
# Read profile settings
self.tessprofiles = read_tessprofiles()
# Init controllers with references to nested screen objects
self.settings_controller = SettingsController(screens['settings'].ids)
self.tessdatadir = self.settings_controller.tesseract['tessdatadir']
self.tesspath = self.settings_controller.tesseract['tesspath']
self.image_selection_controller = ImageSelectionController(screens[HOME_SCREEN].ids)
self.tesseract_controller = TesseractController(screens[HOME_SCREEN].ids)
self.fulltext_view_controller = FulltextViewController(screens['fulltext'].ids)
self.image_editor_controller = ImageEditorController(screens['imageeditor'].ids)
self.model_selection_controller = ModelSelectionController(screens['model'].ids)
self.model_view_controller = ModelViewController(screens['model'].ids)
self.modellist_controller = ModelListController(screens['modellist'].ids)
self.tessprofiles_controller = TessprofilesController(screens['tessprofiles'].ids)
self.model_search_controller = ModelSearchController(screens['model'].ids)
self.diffstdout_controller = DiffStdoutController(screens['diffstdout'].ids)
# gt_search_controller = GTSearchController(screens['gt'].ids)
# Proxy methods
self.is_starred = self.model_selection_controller.is_starred
self.add_star = self.model_selection_controller.add_star
self.select_fulltext = self.fulltext_view_controller.select_fulltext
self.select_image = self.image_editor_controller.select_image
self.remove_star = self.model_selection_controller.remove_star
self.select_model = self.model_view_controller.select_model
self.select_model_from_photo = self.image_selection_controller.select_model_from_photo
self.update_history = self.model_selection_controller.update_history
self.add_control_widget = self.settings_controller.add_control_widget
# Proxy properties
self.locale = self.settings_controller.locale
self.username = self.settings_controller.username
self.password = self.settings_controller.password
self.image_selection_controller.post_init()
self.model_selection_controller.post_init()
def get_model_list_item(self, *args, **kwargs):
""" Get a new :py:class:`.ModelListItem with event binding """
item = ModelListItem(*args, **kwargs)
self.bind_to_select_model(item)
return item
def bind_to_select_model(self, item):
# If ModelListItem's disable_button is set, don't set button action
if not item.disable_button:
item.bind(on_release=lambda x: self.model_view_controller.select_model(x.model))
class TesseractXplore(MDApp, ControllerProxy):
""" Manages window, theme, main screen and navigation state; other application logic is
handled by Controller
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bg_loop = None
self.root = None
self.nav_drawer = None
self.screen_manager = None
self.toolbar = None
# Buffer + delayed trigger For collecting multiple files dropped at once
self.dropped_files = []
self.drop_trigger = Clock.create_trigger(self.process_dropped_files, TRIGGER_DELAY)
def build(self):
# Set color palette
self.theme_cls.primary_palette = MD_PRIMARY_PALETTE
self.theme_cls.accent_palette = MD_ACCENT_PALETTE
# Create an event loop to be used by background loaders
self.bg_loop = asyncio.new_event_loop()
# Need this to get killed when app closes
tmain = Thread(target=self.bg_loop.run_forever)
tmain.setDaemon(True)
tmain.start()
# Init screens and store references to them
screens = load_screens()
self.root = Root()
ControllerProxy.init_controllers(self, screens)
# Init screen manager and nav elements
self.nav_drawer = self.root.ids.nav_drawer
self.screen_manager = self.root.ids.screen_manager
self.toolbar = self.root.ids.toolbar
for screen_name, screen in screens.items():
self.screen_manager.add_widget(screen)
self.set_theme_mode()
self.home()
# self.switch_screen('model')
# Set Window and theme settings
position, left, top = INIT_WINDOW_POSITION
Window.position = position
Window.left = left
Window.top = top
Window.size = INIT_WINDOW_SIZE
Window.bind(on_keyboard=self.on_keyboard)
Window.bind(on_request_close=self.on_request_close)
# On_dropfile sends a single file at a time; this collects files dropped at the same time
Window.bind(on_dropfile=lambda _, path: self.dropped_files.append(path))
Window.bind(on_dropfile=self.drop_trigger)
# Preload atlases so they're immediately available in Kivy cache
# TODO: Currently not necessary, but will be in future version
# Image(source=f'{ATLAS_APP_ICONS}/')
# Image(source=f'{ATLAS_TAXON_ICONS}/')
return self.root
def process_dropped_files(self, *args):
self.image_selection_controller.add_images(self.dropped_files)
self.dropped_files = []
def home(self, *args):
self.switch_screen(HOME_SCREEN)
def open_nav(self, *args):
self.nav_drawer.set_state('open')
def close_nav(self, *args):
self.nav_drawer.set_state('close')
def switch_screen(self, screen_name: str):
# If we're leaving a screen with stored state, save it first
# TODO: Also save stored taxa, but needs optimization first (async, only store if changed)
if self.screen_manager.current in ['settings']:
self.settings_controller.save_settings()
if screen_name == "model":
self.model_view_controller.screen.tessdatadir.text = self.tessdatadir
self.screen_manager.current = screen_name
self.update_toolbar(screen_name)
self.close_nav()
def on_request_close(self, *args):
""" Save any unsaved settings before exiting """
self.settings_controller.save_settings()
self.stop()
def on_keyboard(self, window, key, scancode, codepoint, modifier):
""" Handle keyboard shortcuts """
if (modifier, key) == (['ctrl'], BACKSPACE):
self.home()
elif (modifier, key) == (['ctrl'], ENTER):
self.current_screen_action()
elif (set(modifier), codepoint) == ({'ctrl', 'shift'}, 'x'):
self.current_screen_clear()
elif (modifier, codepoint) == (['ctrl'], 'o'):
self.image_selection_controller.open_native_file_chooser()
elif (set(modifier), codepoint) == ({'ctrl', 'shift'}, 'o'):
self.image_selection_controller.open_native_file_chooser(dirs=True)
elif (modifier, codepoint) == (['ctrl'], 'q'):
self.on_request_close()
elif (modifier, codepoint) == (['ctrl'], 's'):
self.switch_screen('settings')
elif (modifier, codepoint) == (['ctrl'], 't'):
self.switch_screen('model')
elif (modifier, codepoint) == (['ctrl'], 'v'):
self.current_screen_paste()
elif self.screen_manager.current == HOME_SCREEN:
if (modifier, codepoint) == (['ctrl'], '+'):
self.image_selection_controller.zoomin(None, None)
elif (modifier, codepoint) == (['ctrl'], '-'):
self.image_selection_controller.zoomout(None, None)
elif key == F11:
self.toggle_fullscreen()
# TODO: current_screen_*() may be better organized as controller methods (inherited/overridden as needed)
def current_screen_action(self):
""" Run the current screen's main action """
if self.screen_manager.current == HOME_SCREEN:
self.tesseract_controller.recognize(None)
elif self.screen_manager.current == 'model':
self.model_search_controller.search()
def current_screen_clear(self):
""" Clear the settings on the current screen, if applicable """
if self.screen_manager.current == HOME_SCREEN:
self.image_selection_controller.clear()
elif self.screen_manager.current == 'model':
self.model_search_controller.reset_all_search_inputs()
# TODO: Threw this together quickly, this could be cleaned up a lot
def current_screen_paste(self):
value = Clipboard.paste()
model_id, gt_id = 0, 0
if model_id:
self.select_model(id=model_id)
alert(f'Model {model_id} selected')
if gt_id:
# self.select_gt(id=gt_id)
alert(f'GT {gt_id} selected')
if self.screen_manager.current == HOME_SCREEN:
if gt_id:
self.image_selection_controller.screen.gt_id_input.text = str(gt_id)
self.image_selection_controller.screen.model_id_input.text = ''
elif model_id:
self.image_selection_controller.screen.gt_id_input.text = ''
self.image_selection_controller.screen.model_id_input.text = str(model_id)
def update_toolbar(self, screen_name: str):
""" Modify toolbar in-place so it can be shared by all screens """
self.toolbar.title = screen_name.title().replace('_', ' ')
if screen_name == HOME_SCREEN:
self.toolbar.left_action_items = [['menu', self.open_nav]]
else:
self.toolbar.left_action_items = [["arrow-left", self.home]]
self.toolbar.right_action_items = [
['border-none-variant', self.toggle_border],
['fullscreen', self.toggle_fullscreen],
['dots-vertical', self.open_settings],
]
def set_theme_mode(self, switch=None, is_active: bool = None):
""" Set light or dark themes, based on either toggle switch or settings """
if is_active is None:
is_active = self.settings_controller.display['dark_mode']
self.theme_cls.theme_style = 'Dark' if is_active else 'Light'
def toggle_border(self, *args):
""" Enable or disable fullscreen, and change icon"""
# Window fullscreen doesn't work with two displays
if self.toolbar.right_action_items[0][0] == 'border-all-variant':
Window.borderless = 0
icon = 'border-none-variant'
else:
Window.borderless = 1
icon = 'border-all-variant'
self.toolbar.right_action_items[0] = [icon, self.toggle_border]
def toggle_fullscreen(self, *args):
""" Enable or disable fullscreen, and change icon"""
# Window fullscreen doesn't work with two displays
if self.toolbar.right_action_items[1][0] == 'fullscreen-exit':
Window.restore()
icon = 'fullscreen'
else:
Window.maximize()
icon = 'fullscreen-exit'
self.toolbar.right_action_items[1] = [icon, self.toggle_fullscreen]
def main():
TesseractXplore().run()
if __name__ == '__main__':
main()
|
gpshandler.py | #!/usr/bin/python
#
# Copyright 2018 Tampere University of Technology, Pori Department
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import threading
import socket
import json
from time import sleep
import dateutil.parser # python-dateutil
from datetime import tzinfo, datetime, timezone
MSGLEN = 4096
START_WATCH = ('?WATCH={"enable":true,"json":false};').encode()
POLL = ('?POLL;').encode()
POLL_INTERVAL = 1 # location poll interval in seconds
EPOCH = datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=timezone.utc)
#
# GPS handler class
#
class GPSHandler:
#
# @param {Boolean} checkFix check for fix validity
#
def __init__(self, checkFix=True):
self.__lastKnownLocation = None
self.__stopListening = True
self.__thread = None
self.__lock = threading.Lock()
self.__hasFix = False
self.__checkFix = checkFix
self.__serverAddress = ('localhost', 2947) #default values
#
def __del__(self):
self.stopListen()
#
# @param {???} sky JSON object as returned NMEA
# @return {Boolean} True if there was a valid Fix
#
def __checkSkyFix(self, sky):
if not sky:
self.__hasFix = False
return False
sky = sky[0]
satellites = sky.get("satellites", None)
inUse = 0
if satellites: # check fix status by counting used satellites. Note: with gpsd/raspbian, the fix timestamp do not work reliably, making it impossible to figure out when the fix was achieved
for s in satellites:
if s.get("used", False):
inUse +=1
# // if
# // for
# // if
if inUse > 1: # assume that we have a fix when more than 1 satellite is in use
self.__hasFix = True
else:
self.__hasFix = False
return self.__hasFix
#
# GPS information poller method
#
def __poll(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect(self.__serverAddress)
sock.sendall(START_WATCH)
while not self.__stopListening:
sock.sendall(POLL)
data = sock.recv(MSGLEN).decode()
for j in data.split("\n"): # check all messages in the buffer
if j:
try:
nmeajson = json.loads(j)
if nmeajson.get("class", None) != "POLL": # process only POLL responses
continue
if self.__checkFix and not self.__checkSkyFix(nmeajson.get("sky", None)): # if required, check that we have a valid fix
continue
ref = nmeajson.get("tpv", None)
if not ref:
continue
ref = ref[0]
time = ref.get("time", None)
latitude = ref.get("lat", None)
longitude = ref.get("lon", None)
speed = ref.get("speed", None)
if latitude and longitude and speed and time:
self.__lastKnownLocation = LocationData(latitude, longitude, speed, time)
break # no need to search for other poll responses
except json.decoder.JSONDecodeError as ex:
print(ex) # ignore JSON errors, sometimes the socket contains messages that do not have all required data
# // if
# // for
sleep(POLL_INTERVAL)
# // while
finally:
sock.close()
#
# sets address details for the GPSD server
#
def setServerAddress(self, hostname, port):
self.__serverAddress = (hostname, port)
#
# start listening for new GPS data, if already listening, calling this method does nothing
#
def startListen(self):
self.__lock.acquire()
try:
if self.__thread:
print("Already listening...")
return
self.__stopListening = False
self.__thread = threading.Thread(target=self.__poll)
self.__thread.start()
finally:
self.__lock.release()
#
# stop listening for new GPS as soon as possible, if not listening, this does nothing
#
def stopListen(self):
self.__lock.acquire()
try:
self.__stopListening = True
if not self.__thread:
print("Not listening...")
return
self.__thread.join()
self.__thread = None
finally:
self.__lock.release()
#
# @return {LocationData} return the current location or None if current location is not known
#
def getCurrentLocation(self):
if self.__lastKnownLocation and self.__hasFix:
return self.__lastKnownLocation # NOTE: if this creates issues with threading, lock & copy
else:
return None
############################## LocationData ###############################
#
# location data, as received from GPS
#
class LocationData:
#
# @param {double} latitude
# @param {double} longitude
# @param {double} speed
# @param {string} fixTime in ISO8601 format
#
def __init__(self, latitude, longitude, speed, fixTime):
self.latitude = latitude
self.longitude = longitude
self.speed = speed
self.__fixTime = fixTime
#
# @return {int} fix as UNIX timestamp (in ms)
#
def getFixTimestamp(self):
dt = dateutil.parser.parse(self.__fixTime)
return (int((dt - EPOCH).total_seconds())*1000)
|
socket_preview.py | """
sockets for cross-task communication: start threads to communicate over sockets;
independent programs can too, because sockets are system-wide, much like fifos;
see the GUI and Internet parts of the book for more realistic socket use cases;
some socket servers may also need to talk to clients in threads or processes;
sockets pass byte strings, but can be pickled objects or encoded Unicode text;
caveat: prints in threads may need to be synchronized if their output overlaps;
"""
from socket import socket, AF_INET, SOCK_STREAM # portable socket api
port = 50008 # port number identifies socket on machine
host = 'localhost' # server and client run on same local machine here
def server():
sock = socket(AF_INET, SOCK_STREAM) # ip addresses tcp connection
sock.bind(('', port)) # bind to port on this machine
sock.listen(5) # allow up to 5 pending clients
while True:
conn, addr = sock.accept() # wait for client to connect
data = conn.recv(1024) # read bytes data from this client
reply = 'server got: [%s]' % data # conn is a new connected socket
conn.send(reply.encode()) # send bytes reply back to client
def client(name):
sock = socket(AF_INET, SOCK_STREAM)
sock.connect((host, port)) # connect to a socket port
sock.send(name.encode()) # send bytes data to listener
reply = sock.recv(1024) # receive bytes data from listener
sock.close() # up to 1024 bytes in message
print('client got: [%s]' % reply)
if __name__ == '__main__':
from threading import Thread
sthread = Thread(target=server)
sthread.daemon = True # don't wait for server thread
sthread.start() # do wait for children to exit
for i in range(5):
Thread(target=client, args=('client%s' % i,)).start()
|
__init__.py | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import threading
import time
from collections import deque
from multiprocessing import Lock
from jinja2.exceptions import UndefinedError
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleParserError, AnsibleUndefinedVariable
from ansible.executor import action_write_locks
from ansible.executor.process.worker import WorkerProcess
from ansible.executor.task_result import TaskResult
from ansible.inventory.host import Host
from ansible.inventory.group import Group
from ansible.module_utils.six.moves import queue as Queue
from ansible.module_utils.six import iteritems, string_types
from ansible.module_utils._text import to_text
from ansible.playbook.helpers import load_list_of_blocks
from ansible.playbook.included_file import IncludedFile
from ansible.playbook.task_include import TaskInclude
from ansible.playbook.role_include import IncludeRole
from ansible.plugins import action_loader, connection_loader, filter_loader, lookup_loader, module_loader, test_loader
from ansible.template import Templar
from ansible.vars import combine_vars, strip_internal_keys
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
__all__ = ['StrategyBase']
class StrategySentinel:
pass
# TODO: this should probably be in the plugins/__init__.py, with
# a smarter mechanism to set all of the attributes based on
# the loaders created there
class SharedPluginLoaderObj:
'''
A simple object to make pass the various plugin loaders to
the forked processes over the queue easier
'''
def __init__(self):
self.action_loader = action_loader
self.connection_loader = connection_loader
self.filter_loader = filter_loader
self.test_loader = test_loader
self.lookup_loader = lookup_loader
self.module_loader = module_loader
_sentinel = StrategySentinel()
def results_thread_main(strategy):
while True:
try:
result = strategy._final_q.get()
if isinstance(result, StrategySentinel):
break
else:
strategy._results_lock.acquire()
strategy._results.append(result)
strategy._results_lock.release()
except (IOError, EOFError):
break
except Queue.Empty:
pass
class StrategyBase:
'''
This is the base class for strategy plugins, which contains some common
code useful to all strategies like running handlers, cleanup actions, etc.
'''
def __init__(self, tqm):
self._tqm = tqm
self._inventory = tqm.get_inventory()
self._workers = tqm.get_workers()
self._notified_handlers = tqm._notified_handlers
self._listening_handlers = tqm._listening_handlers
self._variable_manager = tqm.get_variable_manager()
self._loader = tqm.get_loader()
self._final_q = tqm._final_q
self._step = getattr(tqm._options, 'step', False)
self._diff = getattr(tqm._options, 'diff', False)
# Backwards compat: self._display isn't really needed, just import the global display and use that.
self._display = display
# internal counters
self._pending_results = 0
self._cur_worker = 0
# this dictionary is used to keep track of hosts that have
# outstanding tasks still in queue
self._blocked_hosts = dict()
self._results = deque()
self._results_lock = threading.Condition(threading.Lock())
# create the result processing thread for reading results in the background
self._results_thread = threading.Thread(target=results_thread_main, args=(self,))
self._results_thread.daemon = True
self._results_thread.start()
def cleanup(self):
self._final_q.put(_sentinel)
self._results_thread.join()
def run(self, iterator, play_context, result=0):
# execute one more pass through the iterator without peeking, to
# make sure that all of the hosts are advanced to their final task.
# This should be safe, as everything should be ITERATING_COMPLETE by
# this point, though the strategy may not advance the hosts itself.
[iterator.get_next_task_for_host(host) for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
# save the failed/unreachable hosts, as the run_handlers()
# method will clear that information during its execution
failed_hosts = iterator.get_failed_hosts()
unreachable_hosts = self._tqm._unreachable_hosts.keys()
display.debug("running handlers")
handler_result = self.run_handlers(iterator, play_context)
if isinstance(handler_result, bool) and not handler_result:
result |= self._tqm.RUN_ERROR
elif not handler_result:
result |= handler_result
# now update with the hosts (if any) that failed or were
# unreachable during the handler execution phase
failed_hosts = set(failed_hosts).union(iterator.get_failed_hosts())
unreachable_hosts = set(unreachable_hosts).union(self._tqm._unreachable_hosts.keys())
# return the appropriate code, depending on the status hosts after the run
if not isinstance(result, bool) and result != self._tqm.RUN_OK:
return result
elif len(unreachable_hosts) > 0:
return self._tqm.RUN_UNREACHABLE_HOSTS
elif len(failed_hosts) > 0:
return self._tqm.RUN_FAILED_HOSTS
else:
return self._tqm.RUN_OK
def get_hosts_remaining(self, play):
return [host for host in self._inventory.get_hosts(play.hosts)
if host.name not in self._tqm._failed_hosts and host.name not in self._tqm._unreachable_hosts]
def get_failed_hosts(self, play):
return [host for host in self._inventory.get_hosts(play.hosts) if host.name in self._tqm._failed_hosts]
def add_tqm_variables(self, vars, play):
'''
Base class method to add extra variables/information to the list of task
vars sent through the executor engine regarding the task queue manager state.
'''
vars['ansible_current_hosts'] = [h.name for h in self.get_hosts_remaining(play)]
vars['ansible_failed_hosts'] = [h.name for h in self.get_failed_hosts(play)]
def _queue_task(self, host, task, task_vars, play_context):
''' handles queueing the task up to be sent to a worker '''
display.debug("entering _queue_task() for %s/%s" % (host.name, task.action))
# Add a write lock for tasks.
# Maybe this should be added somewhere further up the call stack but
# this is the earliest in the code where we have task (1) extracted
# into its own variable and (2) there's only a single code path
# leading to the module being run. This is called by three
# functions: __init__.py::_do_handler_run(), linear.py::run(), and
# free.py::run() so we'd have to add to all three to do it there.
# The next common higher level is __init__.py::run() and that has
# tasks inside of play_iterator so we'd have to extract them to do it
# there.
if task.action not in action_write_locks.action_write_locks:
display.debug('Creating lock for %s' % task.action)
action_write_locks.action_write_locks[task.action] = Lock()
# and then queue the new task
try:
# create a dummy object with plugin loaders set as an easier
# way to share them with the forked processes
shared_loader_obj = SharedPluginLoaderObj()
queued = False
starting_worker = self._cur_worker
while True:
(worker_prc, rslt_q) = self._workers[self._cur_worker]
if worker_prc is None or not worker_prc.is_alive():
worker_prc = WorkerProcess(self._final_q, task_vars, host, task, play_context, self._loader, self._variable_manager, shared_loader_obj)
self._workers[self._cur_worker][0] = worker_prc
worker_prc.start()
display.debug("worker is %d (out of %d available)" % (self._cur_worker+1, len(self._workers)))
queued = True
self._cur_worker += 1
if self._cur_worker >= len(self._workers):
self._cur_worker = 0
if queued:
break
elif self._cur_worker == starting_worker:
time.sleep(0.0001)
self._pending_results += 1
except (EOFError, IOError, AssertionError) as e:
# most likely an abort
display.debug("got an error while queuing: %s" % e)
return
display.debug("exiting _queue_task() for %s/%s" % (host.name, task.action))
def get_task_hosts(self, iterator, task_host, task):
if task.run_once:
host_list = [host for host in self._inventory.get_hosts(iterator._play.hosts) if host.name not in self._tqm._unreachable_hosts]
else:
host_list = [task_host]
return host_list
def get_delegated_hosts(self, result, task):
host_name = result.get('_ansible_delegated_vars', {}).get('ansible_host', None)
if host_name is not None:
actual_host = self._inventory.get_host(host_name)
if actual_host is None:
actual_host = Host(name=host_name)
else:
actual_host = Host(name=task.delegate_to)
return [actual_host]
def _process_pending_results(self, iterator, one_pass=False, max_passes=None):
'''
Reads results off the final queue and takes appropriate action
based on the result (executing callbacks, updating state, etc.).
'''
ret_results = []
def get_original_host(host_name):
host_name = to_text(host_name)
if host_name in self._inventory._hosts_cache:
return self._inventory._hosts_cache[host_name]
else:
return self._inventory.get_host(host_name)
def search_handler_blocks_by_name(handler_name, handler_blocks):
for handler_block in handler_blocks:
for handler_task in handler_block.block:
if handler_task.name:
handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=handler_task)
templar = Templar(loader=self._loader, variables=handler_vars)
try:
# first we check with the full result of get_name(), which may
# include the role name (if the handler is from a role). If that
# is not found, we resort to the simple name field, which doesn't
# have anything extra added to it.
target_handler_name = templar.template(handler_task.name)
if target_handler_name == handler_name:
return handler_task
else:
target_handler_name = templar.template(handler_task.get_name())
if target_handler_name == handler_name:
return handler_task
except (UndefinedError, AnsibleUndefinedVariable):
# We skip this handler due to the fact that it may be using
# a variable in the name that was conditionally included via
# set_fact or some other method, and we don't want to error
# out unnecessarily
continue
return None
def search_handler_blocks_by_uuid(handler_uuid, handler_blocks):
for handler_block in handler_blocks:
for handler_task in handler_block.block:
if handler_uuid == handler_task._uuid:
return handler_task
return None
def parent_handler_match(target_handler, handler_name):
if target_handler:
if isinstance(target_handler, (TaskInclude, IncludeRole)):
try:
handler_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, task=target_handler)
templar = Templar(loader=self._loader, variables=handler_vars)
target_handler_name = templar.template(target_handler.name)
if target_handler_name == handler_name:
return True
else:
target_handler_name = templar.template(target_handler.get_name())
if target_handler_name == handler_name:
return True
except (UndefinedError, AnsibleUndefinedVariable):
pass
return parent_handler_match(target_handler._parent, handler_name)
else:
return False
cur_pass = 0
while True:
try:
self._results_lock.acquire()
task_result = self._results.pop()
except IndexError:
break
finally:
self._results_lock.release()
# get the original host and task. We then assign them to the TaskResult for use in callbacks/etc.
original_host = get_original_host(task_result._host)
found_task = iterator.get_original_task(original_host, task_result._task)
original_task = found_task.copy(exclude_parent=True, exclude_tasks=True)
original_task._parent = found_task._parent
original_task.from_attrs(task_result._task_fields)
task_result._host = original_host
task_result._task = original_task
# get the correct loop var for use later
if original_task.loop_control:
loop_var = original_task.loop_control.loop_var or 'item'
else:
loop_var = 'item'
# send callbacks for 'non final' results
if '_ansible_retry' in task_result._result:
self._tqm.send_callback('v2_runner_retry', task_result)
continue
elif '_ansible_item_result' in task_result._result:
if task_result.is_failed() or task_result.is_unreachable():
self._tqm.send_callback('v2_runner_item_on_failed', task_result)
elif task_result.is_skipped():
self._tqm.send_callback('v2_runner_item_on_skipped', task_result)
else:
if 'diff' in task_result._result:
if self._diff:
self._tqm.send_callback('v2_on_file_diff', task_result)
self._tqm.send_callback('v2_runner_item_on_ok', task_result)
continue
if original_task.register:
host_list = self.get_task_hosts(iterator, original_host, original_task)
clean_copy = strip_internal_keys(task_result._result)
if 'invocation' in clean_copy:
del clean_copy['invocation']
for target_host in host_list:
self._variable_manager.set_nonpersistent_facts(target_host, {original_task.register: clean_copy})
# all host status messages contain 2 entries: (msg, task_result)
role_ran = False
if task_result.is_failed():
role_ran = True
ignore_errors = original_task.ignore_errors
if not ignore_errors:
display.debug("marking %s as failed" % original_host.name)
if original_task.run_once:
# if we're using run_once, we have to fail every host here
for h in self._inventory.get_hosts(iterator._play.hosts):
if h.name not in self._tqm._unreachable_hosts:
state, _ = iterator.get_next_task_for_host(h, peek=True)
iterator.mark_host_failed(h)
state, new_task = iterator.get_next_task_for_host(h, peek=True)
else:
iterator.mark_host_failed(original_host)
# increment the failed count for this host
self._tqm._stats.increment('failures', original_host.name)
# grab the current state and if we're iterating on the rescue portion
# of a block then we save the failed task in a special var for use
# within the rescue/always
state, _ = iterator.get_next_task_for_host(original_host, peek=True)
if iterator.is_failed(original_host) and state and state.run_state == iterator.ITERATING_COMPLETE:
self._tqm._failed_hosts[original_host.name] = True
if state and state.run_state == iterator.ITERATING_RESCUE:
self._variable_manager.set_nonpersistent_facts(
original_host,
dict(
ansible_failed_task=original_task.serialize(),
ansible_failed_result=task_result._result,
),
)
else:
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
self._tqm.send_callback('v2_runner_on_failed', task_result, ignore_errors=ignore_errors)
elif task_result.is_unreachable():
self._tqm._unreachable_hosts[original_host.name] = True
iterator._play._removed_hosts.append(original_host.name)
self._tqm._stats.increment('dark', original_host.name)
self._tqm.send_callback('v2_runner_on_unreachable', task_result)
elif task_result.is_skipped():
self._tqm._stats.increment('skipped', original_host.name)
self._tqm.send_callback('v2_runner_on_skipped', task_result)
else:
role_ran = True
if original_task.loop:
# this task had a loop, and has more than one result, so
# loop over all of them instead of a single result
result_items = task_result._result.get('results', [])
else:
result_items = [ task_result._result ]
for result_item in result_items:
if '_ansible_notify' in result_item:
if task_result.is_changed():
# The shared dictionary for notified handlers is a proxy, which
# does not detect when sub-objects within the proxy are modified.
# So, per the docs, we reassign the list so the proxy picks up and
# notifies all other threads
for handler_name in result_item['_ansible_notify']:
found = False
# Find the handler using the above helper. First we look up the
# dependency chain of the current task (if it's from a role), otherwise
# we just look through the list of handlers in the current play/all
# roles and use the first one that matches the notify name
target_handler = search_handler_blocks_by_name(handler_name, iterator._play.handlers)
if target_handler is not None:
found = True
if original_host not in self._notified_handlers[target_handler._uuid]:
self._notified_handlers[target_handler._uuid].append(original_host)
# FIXME: should this be a callback?
display.vv("NOTIFIED HANDLER %s" % (handler_name,))
else:
# As there may be more than one handler with the notified name as the
# parent, so we just keep track of whether or not we found one at all
for target_handler_uuid in self._notified_handlers:
target_handler = search_handler_blocks_by_uuid(target_handler_uuid, iterator._play.handlers)
if target_handler and parent_handler_match(target_handler, handler_name):
found = True
if original_host not in self._notified_handlers[target_handler._uuid]:
self._notified_handlers[target_handler._uuid].append(original_host)
display.vv("NOTIFIED HANDLER %s" % (target_handler.get_name(),))
if handler_name in self._listening_handlers:
for listening_handler_uuid in self._listening_handlers[handler_name]:
listening_handler = search_handler_blocks_by_uuid(listening_handler_uuid, iterator._play.handlers)
if listening_handler is not None:
found = True
else:
continue
if original_host not in self._notified_handlers[listening_handler._uuid]:
self._notified_handlers[listening_handler._uuid].append(original_host)
display.vv("NOTIFIED HANDLER %s" % (listening_handler.get_name(),))
# and if none were found, then we raise an error
if not found:
msg = ("The requested handler '%s' was not found in either the main handlers list nor in the listening "
"handlers list" % handler_name)
if C.ERROR_ON_MISSING_HANDLER:
raise AnsibleError(msg)
else:
display.warning(msg)
if 'add_host' in result_item:
# this task added a new host (add_host module)
new_host_info = result_item.get('add_host', dict())
self._add_host(new_host_info, iterator)
elif 'add_group' in result_item:
# this task added a new group (group_by module)
self._add_group(original_host, result_item)
if 'ansible_facts' in result_item:
# if delegated fact and we are delegating facts, we need to change target host for them
if original_task.delegate_to is not None and original_task.delegate_facts:
host_list = self.get_delegated_hosts(result_item, original_task)
else:
host_list = self.get_task_hosts(iterator, original_host, original_task)
if original_task.action == 'include_vars':
for (var_name, var_value) in iteritems(result_item['ansible_facts']):
# find the host we're actually referring too here, which may
# be a host that is not really in inventory at all
for target_host in host_list:
self._variable_manager.set_host_variable(target_host, var_name, var_value)
else:
for target_host in host_list:
if original_task.action == 'set_fact':
self._variable_manager.set_nonpersistent_facts(target_host, result_item['ansible_facts'].copy())
else:
self._variable_manager.set_host_facts(target_host, result_item['ansible_facts'].copy())
if 'ansible_stats' in result_item and 'data' in result_item['ansible_stats'] and result_item['ansible_stats']['data']:
if 'per_host' not in result_item['ansible_stats'] or result_item['ansible_stats']['per_host']:
host_list = self.get_task_hosts(iterator, original_host, original_task)
else:
host_list = [None]
data = result_item['ansible_stats']['data']
aggregate = 'aggregate' in result_item['ansible_stats'] and result_item['ansible_stats']['aggregate']
for myhost in host_list:
for k in data.keys():
if aggregate:
self._tqm._stats.update_custom_stats(k, data[k], myhost)
else:
self._tqm._stats.set_custom_stats(k, data[k], myhost)
if 'diff' in task_result._result:
if self._diff:
self._tqm.send_callback('v2_on_file_diff', task_result)
if original_task.action not in ['include', 'include_role']:
self._tqm._stats.increment('ok', original_host.name)
if 'changed' in task_result._result and task_result._result['changed']:
self._tqm._stats.increment('changed', original_host.name)
# finally, send the ok for this task
self._tqm.send_callback('v2_runner_on_ok', task_result)
self._pending_results -= 1
if original_host.name in self._blocked_hosts:
del self._blocked_hosts[original_host.name]
# If this is a role task, mark the parent role as being run (if
# the task was ok or failed, but not skipped or unreachable)
if original_task._role is not None and role_ran: #TODO: and original_task.action != 'include_role':?
# lookup the role in the ROLE_CACHE to make sure we're dealing
# with the correct object and mark it as executed
for (entry, role_obj) in iteritems(iterator._play.ROLE_CACHE[original_task._role._role_name]):
if role_obj._uuid == original_task._role._uuid:
role_obj._had_task_run[original_host.name] = True
ret_results.append(task_result)
if one_pass or max_passes is not None and (cur_pass+1) >= max_passes:
break
cur_pass += 1
return ret_results
def _wait_on_pending_results(self, iterator):
'''
Wait for the shared counter to drop to zero, using a short sleep
between checks to ensure we don't spin lock
'''
ret_results = []
display.debug("waiting for pending results...")
while self._pending_results > 0 and not self._tqm._terminated:
if self._tqm.has_dead_workers():
raise AnsibleError("A worker was found in a dead state")
results = self._process_pending_results(iterator)
ret_results.extend(results)
if self._pending_results > 0:
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
display.debug("no more pending results, returning what we have")
return ret_results
def _add_host(self, host_info, iterator):
'''
Helper function to add a new host to inventory based on a task result.
'''
host_name = host_info.get('host_name')
# Check if host in inventory, add if not
new_host = self._inventory.get_host(host_name)
if not new_host:
new_host = Host(name=host_name)
self._inventory._hosts_cache[host_name] = new_host
self._inventory.get_host_vars(new_host)
allgroup = self._inventory.get_group('all')
allgroup.add_host(new_host)
# Set/update the vars for this host
new_host.vars = combine_vars(new_host.vars, self._inventory.get_host_vars(new_host))
new_host.vars = combine_vars(new_host.vars, host_info.get('host_vars', dict()))
new_groups = host_info.get('groups', [])
for group_name in new_groups:
if not self._inventory.get_group(group_name):
new_group = Group(group_name)
self._inventory.add_group(new_group)
self._inventory.get_group_vars(new_group)
new_group.vars = self._inventory.get_group_variables(group_name)
else:
new_group = self._inventory.get_group(group_name)
new_group.add_host(new_host)
# add this host to the group cache
if self._inventory.groups is not None:
if group_name in self._inventory.groups:
if new_host not in self._inventory.get_group(group_name).hosts:
self._inventory.get_group(group_name).hosts.append(new_host.name)
# clear pattern caching completely since it's unpredictable what
# patterns may have referenced the group
self._inventory.clear_pattern_cache()
# clear cache of group dict, which is used in magic host variables
self._inventory.clear_group_dict_cache()
# also clear the hostvar cache entry for the given play, so that
# the new hosts are available if hostvars are referenced
self._variable_manager.invalidate_hostvars_cache(play=iterator._play)
def _add_group(self, host, result_item):
'''
Helper function to add a group (if it does not exist), and to assign the
specified host to that group.
'''
changed = False
# the host here is from the executor side, which means it was a
# serialized/cloned copy and we'll need to look up the proper
# host object from the master inventory
real_host = self._inventory.get_host(host.name)
group_name = result_item.get('add_group')
new_group = self._inventory.get_group(group_name)
if not new_group:
# create the new group and add it to inventory
new_group = Group(name=group_name)
self._inventory.add_group(new_group)
new_group.vars = self._inventory.get_group_vars(new_group)
# and add the group to the proper hierarchy
allgroup = self._inventory.get_group('all')
allgroup.add_child_group(new_group)
changed = True
if group_name not in host.get_groups():
new_group.add_host(real_host)
changed = True
if changed:
# clear cache of group dict, which is used in magic host variables
self._inventory.clear_group_dict_cache()
return changed
def _load_included_file(self, included_file, iterator, is_handler=False):
'''
Loads an included YAML file of tasks, applying the optional set of variables.
'''
display.debug("loading included file: %s" % included_file._filename)
try:
data = self._loader.load_from_file(included_file._filename)
if data is None:
return []
elif not isinstance(data, list):
raise AnsibleError("included task files must contain a list of tasks")
ti_copy = included_file._task.copy()
temp_vars = ti_copy.vars.copy()
temp_vars.update(included_file._args)
# pop tags out of the include args, if they were specified there, and assign
# them to the include. If the include already had tags specified, we raise an
# error so that users know not to specify them both ways
tags = included_file._task.vars.pop('tags', [])
if isinstance(tags, string_types):
tags = tags.split(',')
if len(tags) > 0:
if len(included_file._task.tags) > 0:
raise AnsibleParserError("Include tasks should not specify tags in more than one way (both via args and directly on the task). "
"Mixing tag specify styles is prohibited for whole import hierarchy, not only for single import statement",
obj=included_file._task._ds)
display.deprecated("You should not specify tags in the include parameters. All tags should be specified using the task-level option")
included_file._task.tags = tags
ti_copy.vars = temp_vars
block_list = load_list_of_blocks(
data,
play=iterator._play,
parent_block=None,
task_include=ti_copy,
role=included_file._task._role,
use_handlers=is_handler,
loader=self._loader,
variable_manager=self._variable_manager,
)
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
except AnsibleError as e:
# mark all of the hosts including this file as failed, send callbacks,
# and increment the stats for this host
for host in included_file._hosts:
tr = TaskResult(host=host, task=included_file._task, return_data=dict(failed=True, reason=to_text(e)))
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
self._tqm._stats.increment('failures', host.name)
self._tqm.send_callback('v2_runner_on_failed', tr)
return []
# finally, send the callback and return the list of blocks loaded
self._tqm.send_callback('v2_playbook_on_include', included_file)
display.debug("done processing included file")
return block_list
def run_handlers(self, iterator, play_context):
'''
Runs handlers on those hosts which have been notified.
'''
result = self._tqm.RUN_OK
for handler_block in iterator._play.handlers:
# FIXME: handlers need to support the rescue/always portions of blocks too,
# but this may take some work in the iterator and gets tricky when
# we consider the ability of meta tasks to flush handlers
for handler in handler_block.block:
if handler._uuid in self._notified_handlers and len(self._notified_handlers[handler._uuid]):
result = self._do_handler_run(handler, handler.get_name(), iterator=iterator, play_context=play_context)
if not result:
break
return result
def _do_handler_run(self, handler, handler_name, iterator, play_context, notified_hosts=None):
# FIXME: need to use iterator.get_failed_hosts() instead?
#if not len(self.get_hosts_remaining(iterator._play)):
# self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
# result = False
# break
saved_name = handler.name
handler.name = handler_name
self._tqm.send_callback('v2_playbook_on_handler_task_start', handler)
handler.name = saved_name
if notified_hosts is None:
notified_hosts = self._notified_handlers[handler._uuid]
run_once = False
try:
action = action_loader.get(handler.action, class_only=True)
if handler.run_once or getattr(action, 'BYPASS_HOST_LOOP', False):
run_once = True
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
pass
host_results = []
for host in notified_hosts:
if not handler.has_triggered(host) and (not iterator.is_failed(host) or play_context.force_handlers):
task_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=host, task=handler)
self.add_tqm_variables(task_vars, play=iterator._play)
self._queue_task(host, handler, task_vars, play_context)
if run_once:
break
# collect the results from the handler run
host_results = self._wait_on_pending_results(iterator)
try:
included_files = IncludedFile.process_include_results(
host_results,
self._tqm,
iterator=iterator,
inventory=self._inventory,
loader=self._loader,
variable_manager=self._variable_manager
)
except AnsibleError as e:
return False
result = True
if len(included_files) > 0:
for included_file in included_files:
try:
new_blocks = self._load_included_file(included_file, iterator=iterator, is_handler=True)
# for every task in each block brought in by the include, add the list
# of hosts which included the file to the notified_handlers dict
for block in new_blocks:
iterator._play.handlers.append(block)
iterator.cache_block_tasks(block)
for task in block.block:
result = self._do_handler_run(
handler=task,
handler_name=None,
iterator=iterator,
play_context=play_context,
notified_hosts=included_file._hosts[:],
)
if not result:
break
except AnsibleError as e:
for host in included_file._hosts:
iterator.mark_host_failed(host)
self._tqm._failed_hosts[host.name] = True
display.warning(str(e))
continue
# wipe the notification list
self._notified_handlers[handler._uuid] = []
display.debug("done running handlers, result is: %s" % result)
return result
def _take_step(self, task, host=None):
ret=False
msg=u'Perform task: %s ' % task
if host:
msg += u'on %s ' % host
msg += u'(N)o/(y)es/(c)ontinue: '
resp = display.prompt(msg)
if resp.lower() in ['y','yes']:
display.debug("User ran task")
ret = True
elif resp.lower() in ['c', 'continue']:
display.debug("User ran task and canceled step mode")
self._step = False
ret = True
else:
display.debug("User skipped task")
display.banner(msg)
return ret
def _execute_meta(self, task, play_context, iterator, target_host):
# meta tasks store their args in the _raw_params field of args,
# since they do not use k=v pairs, so get that
meta_action = task.args.get('_raw_params')
# FIXME(s):
# * raise an error or show a warning when a conditional is used
# on a meta task that doesn't support them
def _evaluate_conditional(h):
all_vars = self._variable_manager.get_vars(loader=self._loader, play=iterator._play, host=h, task=task)
templar = Templar(loader=self._loader, variables=all_vars)
return task.evaluate_conditional(templar, all_vars)
skipped = False
msg = ''
if meta_action == 'noop':
# FIXME: issue a callback for the noop here?
msg="noop"
elif meta_action == 'flush_handlers':
self.run_handlers(iterator, play_context)
msg = "ran handlers"
elif meta_action == 'refresh_inventory':
self._inventory.refresh_inventory()
msg = "inventory successfully refreshed"
elif meta_action == 'clear_facts':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
self._variable_manager.clear_facts(host)
msg = "facts cleared"
else:
skipped = True
elif meta_action == 'clear_host_errors':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
self._tqm._failed_hosts.pop(host.name, False)
self._tqm._unreachable_hosts.pop(host.name, False)
iterator._host_states[host.name].fail_state = iterator.FAILED_NONE
msg="cleared host errors"
else:
skipped = True
elif meta_action == 'end_play':
if _evaluate_conditional(target_host):
for host in self._inventory.get_hosts(iterator._play.hosts):
if not host.name in self._tqm._unreachable_hosts:
iterator._host_states[host.name].run_state = iterator.ITERATING_COMPLETE
msg="ending play"
elif meta_action == 'reset_connection':
connection = connection_loader.get(play_context.connection, play_context, os.devnull)
if connection:
connection.reset()
msg= 'reset connection'
else:
msg= 'no connection, nothing to reset'
else:
raise AnsibleError("invalid meta action requested: %s" % meta_action, obj=task._ds)
result = { 'msg': msg }
if skipped:
result['skipped'] = True
else:
result['changed'] = False
display.vv("META: %s" % msg)
return [TaskResult(target_host, task, result)]
def get_hosts_left(self, iterator):
''' returns list of available hosts for this iterator by filtering out unreachables '''
hosts_left = []
for host in self._inventory.get_hosts(iterator._play.hosts, order=iterator._play.order):
if host.name not in self._tqm._unreachable_hosts:
hosts_left.append(host)
return hosts_left
|
main.py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import socket
import sys
import pickle
import random
import time
from threading import Thread
def toAddr(s):
parts = s.split(":")
return parts[0], int(parts[1])
class Node:
def __init__(self, index, value, crash, allnode):
self.skt = socket.socket(socket.AF_INET, socket.SOCK_DGRAM);
self.skt.bind(allnode[index])
self.skt.settimeout(0.5)
self.index = index
self.crash = crash
self.round = 1
self.nodes = allnode
self.values = {value : "new", -1 : "new"}
self.wait = 0
self.last = 0
def get_same(self):
while True:
if(self.crash == self.round):
num = random.randint(1, len(self.nodes))
tabu = []
for i in range(num):
tabu.append(self.nodes[random.randint(0, len(self.nodes) - 1)])
self.send_news(tabu)
print("%d Crash!" % self.index)
break
self.send_news()
ll = self.recv_news()
if(self.last == ll):
min = -1
for key in self.values:
if(key < min or min == -1):
min = key
print("%s last Value: %d, round %d" % (self.index, min, self.round))
break
self.last = ll
self.round = self.round + 1
time.sleep(1)
def send_news(self, tabu = []):
msg = []
for key in self.values:
if(self.values[key] == "new"):
msg.append(key)
if(key != -1):
self.values[key] = "old"
if(len(msg) > 0):
sid = 0
for node in self.nodes:
sid = sid + 1
if (self.nodes[self.index] != node and sid != self.index):
sendMsg = (self.index, self.round, msg)
self.skt.sendto(pickle.dumps(sendMsg), node)
print("%s send %s to %s" % (self.index,msg, sid))
def recv_news(self):
recvd = {}
while True:
try:
data, addr = self.skt.recvfrom(4396)
except socket.timeout:
print("%d timeout" % self.index)
break
(src, r, e) = pickle.loads(data)
print("%d recv %s" % (self.index, e))
if(r != self.round):
print("%d not Synchronize!\n" % src)
if(r == self.round):
recvd[src] = e
for i in e:
if(not (i in self.values)):
self.values[i] = "new"
return len(recvd)
ipPort = ["127.0.0.1:5566", "127.0.0.1:5567", "127.0.0.1:5568", "127.0.0.1:5569", "127.0.0.1:5570", "127.0.0.1:5571"]
nodes = list(map(toAddr,ipPort))
node1 = Node(0, 1, 1, nodes)
node2 = Node(1, 2, 2, nodes)
node3 = Node(2, 3, 3, nodes)
node4 = Node(3, 4, -1, nodes)
node5 = Node(4, 5, -1, nodes)
node6 = Node(5, 6, -1, nodes)
print("begin")
Thread(target=node1.get_same).start()
Thread(target=node2.get_same).start()
Thread(target=node3.get_same).start()
Thread(target=node4.get_same).start()
Thread(target=node5.get_same).start()
Thread(target=node6.get_same).start()
|
lightserver.py | import os
from signal import signal, SIGPIPE, SIG_DFL, SIGTERM
from flask import Flask, render_template
from flask_bootstrap import Bootstrap
from flask_wtf import Form
from flask_codemirror.fields import CodeMirrorField
from wtforms.fields import SubmitField
from flask_codemirror import CodeMirror
from flask import request
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
signal(SIGPIPE, SIG_DFL)
result = None
output = None
CODEMIRROR_LANGUAGES = ['python', 'html']
SECRET_KEY = 'secret!'
CODEMIRROR_THEME = '3024-night'
CODEMIRROR_ADDONS = (
('display', 'placeholder'),
)
app = Flask(__name__)
app.config.from_object(__name__)
codemirror = CodeMirror(app)
Bootstrap(app)
@app.route('/')
def test():
return render_template('v2.html')
@app.route("/kill")
def kill():
global result
print "Starting kill"
if result != None:
print "Killing"
os.killpg(os.getpgid(result.pid), SIGTERM)
result = None
return "Killed it..."
else:
return "Nothing to kill"
def spawn(code, userInput):
global result
global output
f = open('userCode/code.py', 'w')
f.write(code)
f.close()
f = open('userCode/input.txt', 'w')
f.write(userInput)
f.close()
result = Popen("python userCode/code.py < userCode/input.txt", stdout=PIPE,
stdin=PIPE, stderr=PIPE, shell=True, preexec_fn=os.setsid)
output = result.communicate()[0]
@app.route("/run", methods=['POST'])
def runcode():
code = request.form['code']
userInput = request.form['input']
t = Thread(target=spawn, args=(code, userInput,))
t.start()
return "Running..."
@app.route("/output")
def getOutput():
global output
if output != None:
temp = (output + '.')[:-1]
output = None
return temp
else:
return "No output..."
if __name__ == '__main__':
app.run(debug=True)
|
serve.py | # Copyright 2022 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import asyncio
import inspect
import json
import yaml
import os
import re
import signal
import sys
import threading
import time
import uuid
from concurrent.futures import ThreadPoolExecutor
from typing import Any, Dict
from asgiref.sync import async_to_sync
from fastapi import FastAPI
from fastapi.exceptions import RequestValidationError
from starlette.exceptions import HTTPException as StarletteHTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, PlainTextResponse, Response
from cortex_internal.lib import util
from cortex_internal.lib.api import DynamicBatcher, RealtimeAPI
from cortex_internal.lib.concurrency import FileLock
from cortex_internal.lib.exceptions import UserException, UserRuntimeException
from cortex_internal.lib.log import configure_logger
from cortex_internal.lib.telemetry import capture_exception, get_default_tags, init_sentry
init_sentry(tags=get_default_tags())
logger = configure_logger("cortex", os.environ["CORTEX_LOG_CONFIG_FILE"])
request_thread_pool = ThreadPoolExecutor(max_workers=int(os.environ["CORTEX_THREADS_PER_PROCESS"]))
loop = asyncio.get_event_loop()
loop.set_default_executor(request_thread_pool)
app = FastAPI()
local_cache: Dict[str, Any] = {
"api": None,
"handler_impl": None,
"dynamic_batcher": None,
"api_route": None,
"client": None,
}
@app.on_event("startup")
def startup():
open(f"/run/workspace/proc-{os.getpid()}-ready.txt", "a").close()
@app.on_event("shutdown")
def shutdown():
try:
os.remove("/run/workspace/api_readiness.txt")
except FileNotFoundError:
pass
try:
os.remove(f"/run/workspace/proc-{os.getpid()}-ready.txt")
except FileNotFoundError:
pass
def is_allowed_request(request):
return (
request.url.path == local_cache["api_route"]
and request.method.lower() in local_cache["handle_fn_args"]
)
@app.exception_handler(StarletteHTTPException)
async def http_exception_handler(request, e):
response = Response(content=str(e.detail), status_code=e.status_code)
return response
@app.exception_handler(RequestValidationError)
async def validation_exception_handler(request, e):
response = Response(content=str(e), status_code=400)
return response
@app.exception_handler(Exception)
async def uncaught_exception_handler(request, e):
response = Response(content="internal server error", status_code=500)
return response
@app.middleware("http")
async def parse_payload(request: Request, call_next):
if not is_allowed_request(request):
return await call_next(request)
verb = request.method.lower()
if (
verb in local_cache["handle_fn_args"]
and "payload" not in local_cache["handle_fn_args"][verb]
):
return await call_next(request)
content_type = request.headers.get("content-type", "").lower()
if content_type.startswith("text/plain"):
try:
charset = "utf-8"
matches = re.findall(r"charset=(\S+)", content_type)
if len(matches) > 0:
charset = matches[-1].rstrip(";")
body = await request.body()
request.state.payload = body.decode(charset)
except Exception as e:
return PlainTextResponse(content=str(e), status_code=400)
elif content_type.startswith("multipart/form") or content_type.startswith(
"application/x-www-form-urlencoded"
):
try:
request.state.payload = await request.form()
except Exception as e:
return PlainTextResponse(content=str(e), status_code=400)
elif content_type.startswith("application/json"):
try:
request.state.payload = await request.json()
except json.JSONDecodeError as e:
return JSONResponse(content={"error": str(e)}, status_code=400)
else:
request.state.payload = await request.body()
return await call_next(request)
def handle(request: Request):
if async_to_sync(request.is_disconnected)():
return Response(status_code=499, content="disconnected client")
verb = request.method.lower()
handle_fn_args = local_cache["handle_fn_args"]
if verb not in handle_fn_args:
return Response(status_code=405, content="method not implemented")
handler_impl = local_cache["handler_impl"]
dynamic_batcher = None
if verb == "post":
dynamic_batcher: DynamicBatcher = local_cache["dynamic_batcher"]
kwargs = build_handler_kwargs(request)
if dynamic_batcher:
result = dynamic_batcher.process(**kwargs)
else:
result = getattr(handler_impl, f"handle_{verb}")(**kwargs)
callback = None
if isinstance(result, tuple) and len(result) == 2 and callable(result[1]):
callback = result[1]
result = result[0]
if isinstance(result, bytes):
response = Response(content=result, media_type="application/octet-stream")
elif isinstance(result, str):
response = Response(content=result, media_type="text/plain")
elif isinstance(result, Response):
response = result
else:
try:
json_string = json.dumps(result)
except Exception as e:
raise UserRuntimeException(
str(e),
"please return an object that is JSON serializable (including its nested fields), a bytes object, "
"a string, or a `starlette.response.Response` object",
) from e
response = Response(content=json_string, media_type="application/json")
if callback is not None:
request_thread_pool.submit(callback)
return response
def build_handler_kwargs(request: Request):
kwargs = {}
verb = request.method.lower()
if "payload" in local_cache["handle_fn_args"][verb]:
kwargs["payload"] = request.state.payload
if "headers" in local_cache["handle_fn_args"][verb]:
kwargs["headers"] = request.headers
if "query_params" in local_cache["handle_fn_args"][verb]:
kwargs["query_params"] = request.query_params
return kwargs
def get_summary():
response = {}
if hasattr(local_cache["client"], "metadata"):
client = local_cache["client"]
response = {
"model_metadata": client.metadata,
}
return response
# this exists so that the user's __init__() can be executed by the request thread pool, which helps
# to avoid errors that occur when the user's __init__() function must be called by the same thread
# which executes handle_<verb>() methods. This only avoids errors if threads_per_worker == 1
def start():
future = request_thread_pool.submit(start_fn)
return future.result()
def start_fn():
project_dir = os.environ["CORTEX_PROJECT_DIR"]
model_server_config_path = os.environ["CORTEX_MODEL_SERVER_CONFIG"]
model_dir = os.getenv("CORTEX_MODEL_DIR")
tf_serving_port = os.getenv("CORTEX_TF_BASE_SERVING_PORT", "9000")
tf_serving_host = os.getenv("CORTEX_TF_SERVING_HOST", "localhost")
try:
with open(model_server_config_path) as yaml_file:
model_server_config = yaml.safe_load(yaml_file)
api = RealtimeAPI(model_server_config, model_dir)
client = api.initialize_client(
tf_serving_host=tf_serving_host, tf_serving_port=tf_serving_port
)
# use the filelock to load one handler at a time (if multiple processes are run)
with FileLock("/run/init_stagger.lock"):
logger.info("loading the handler from {}".format(api.path))
handler_impl = api.initialize_impl(project_dir=project_dir, client=client)
# crons only stop if an unhandled exception occurs
def check_if_crons_have_failed():
while True:
for cron in api.crons:
if not cron.is_alive():
os.kill(os.getpid(), signal.SIGQUIT)
time.sleep(1)
threading.Thread(target=check_if_crons_have_failed, daemon=True).start()
local_cache["api"] = api
local_cache["client"] = client
local_cache["handler_impl"] = handler_impl
local_cache["handle_fn_args"] = {}
for verb in ["post", "get", "put", "patch", "delete"]:
if util.has_method(handler_impl, f"handle_{verb}"):
local_cache["handle_fn_args"][verb] = inspect.getfullargspec(
getattr(handler_impl, f"handle_{verb}")
).args
if len(local_cache["handle_fn_args"]) == 0:
raise UserException(
"no user-defined `handle_<verb>` method found in handler class; define at least one verb handler (`handle_post`, `handle_get`, `handle_put`, `handle_patch`, `handle_delete`)"
)
if api.python_server_side_batching_enabled:
dynamic_batching_config = api._model_server_config["server_side_batching"]
if "post" in local_cache["handle_fn_args"]:
local_cache["dynamic_batcher"] = DynamicBatcher(
handler_impl,
method_name=f"handle_post",
max_batch_size=dynamic_batching_config["max_batch_size"],
batch_interval_seconds=dynamic_batching_config["batch_interval_seconds"],
)
else:
raise UserException(
"dynamic batcher has been enabled, but no `handle_post` method could be found in the `Handler` class"
)
local_cache["api_route"] = "/"
local_cache["info_route"] = "/info"
except Exception as err:
if not isinstance(err, UserRuntimeException):
capture_exception(err)
logger.exception("failed to start api")
sys.exit(1)
app.add_api_route(
local_cache["api_route"],
handle,
methods=[verb.upper() for verb in local_cache["handle_fn_args"]],
)
app.add_api_route(local_cache["info_route"], get_summary, methods=["GET"])
return app
|
03-tank-custom.py | #!/usr/bin/env python3
import logging
import threading
import signal
import time
import ev3dev.ev3 as ev3
import sys
from ev3dev.auto import OUTPUT_A, OUTPUT_B, OUTPUT_C, OUTPUT_D
from ev3dev.helper import MediumMotor
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)5s: %(message)s')
log = logging.getLogger(__name__)
log.info("Starting TRACK3RWithClaw")
class Tank(object):
def __init__(self, left_motor, right_motor, polarity='normal', name='Tank',
speed_sp=400):
self.left_motor = ev3.LargeMotor(left_motor)
self.right_motor = ev3.LargeMotor(right_motor)
for x in (self.left_motor, self.right_motor):
if not x.connected:
log.error("%s is not connected" % x)
sys.exit(1)
self.left_motor.reset()
self.right_motor.reset()
self.speed_sp = speed_sp
self.left_motor.speed_sp = self.speed_sp
self.right_motor.speed_sp = self.speed_sp
self.set_polarity(polarity)
self.name = name
log.info("Created Tank object "+name+" for speed "+str(self.speed_sp))
def __str__(self):
return self.name
def set_polarity(self, polarity):
valid_choices = ('normal', 'inversed')
assert polarity in valid_choices,\
"%s is an invalid polarity choice, must be %s" % (polarity, ', '.join(valid_choices))
self.left_motor.polarity = polarity
self.right_motor.polarity = polarity
class RemoteControlledTank(Tank):
def __init__(self, left_motor, right_motor, polarity='normal', channel=1, speed_sp=400):
Tank.__init__(self, left_motor, right_motor, polarity, speed_sp=speed_sp)
log.info("Getting remote control for channel "+str(channel))
self.remote = ev3.RemoteControl(channel=channel)
if not self.remote.connected:
log.error("%s is not connected" % self.remote)
sys.exit(1)
self.remote.on_red_up = self.make_move(self.left_motor, self.speed_sp)
self.remote.on_red_down = self.make_move(self.left_motor, self.speed_sp * -1)
self.remote.on_blue_up = self.make_move(self.right_motor, self.speed_sp)
self.remote.on_blue_down = self.make_move(self.right_motor, self.speed_sp * -1)
def make_move(self, motor, dc_sp):
def move(state):
if state:
motor.run_forever(speed_sp=dc_sp)
else:
motor.stop()
return move
def main(self, done):
try:
while not done.is_set():
self.remote.process()
time.sleep(0.01)
# Exit cleanly so that all motors are stopped
except (KeyboardInterrupt, Exception) as e:
log.exception(e)
for motor in ev3.list_motors():
motor.stop()
class TRACK3R(RemoteControlledTank):
"""
Base class for all TRACK3R variations. The only difference in the child
classes are in how the medium motor is handled.
To enable the medium motor toggle the beacon button on the EV3 remote.
"""
def __init__(self, medium_motor, left_motor, right_motor, speed_sp=400, channel=1):
RemoteControlledTank.__init__(self, left_motor, right_motor, speed_sp=speed_sp, channel=channel)
self.medium_motor = MediumMotor(medium_motor)
if not self.medium_motor.connected:
log.error("%s is not connected" % self.medium_motor)
sys.exit(1)
self.medium_motor.reset()
class TRACK3RWithClaw(TRACK3R):
def __init__(self, medium_motor=OUTPUT_A, left_motor=OUTPUT_B, right_motor=OUTPUT_C, speed_sp=400, channel=1):
TRACK3R.__init__(self, medium_motor, left_motor, right_motor, speed_sp=speed_sp, channel=channel)
self.remote.on_beacon = self.move_claw
def move_claw(self, state):
if state:
self.medium_motor.run_to_rel_pos(speed_sp=200, position_sp=-75)
else:
self.medium_motor.run_to_rel_pos(speed_sp=200, position_sp=75)
def touch_leds(done):
"""
This is the second thread of execution. It will constantly poll the
touch button and change leds
"""
ts = ev3.TouchSensor()
while not done.is_set():
ev3.Leds.set_color(ev3.Leds.LEFT, (ev3.Leds.GREEN, ev3.Leds.RED)[ts.value()])
def color_speaker(done):
"""
This will poll color and say its name if changed.
"""
cl = ev3.ColorSensor()
assert cl.connected, "Connect a color sensor to any sensor port"
cl.mode='COL-COLOR'
colors=('unknown','black','blue','green','yellow','red','white','brown')
lastcolor=0
while not done.is_set():
thiscolor = cl.value()
if thiscolor != lastcolor:
lastcolor = thiscolor
if thiscolor:
print(colors[thiscolor])
ev3.Sound.speak("This is "+colors[thiscolor]+".").wait()
time.sleep(0.5)
# The 'done' event will be used to signal the threads to stop:
done = threading.Event()
# We also need to catch SIGINT (keyboard interrup) and SIGTERM (termination
# signal from brickman) and exit gracefully:
def signal_handler(signal, frame):
done.set()
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
trackerBasic = TRACK3RWithClaw()
trackerFast = TRACK3RWithClaw(channel=2, speed_sp=800)
#trackerSlow = TRACK3RWithClaw(channel=3, speed_sp=200)
# Now that we have the worker functions defined, lets run those in separate
# threads.
#touchthread = threading.Thread(target=touch_leds, args=(done,))
colorthread = threading.Thread(target=color_speaker, args=(done,))
fastthread = threading.Thread(target=trackerFast.main, args=(done,))
#touchthread.start()
colorthread.start()
fastthread.start()
log.info("Started TRACK3RWithClaw")
ev3.Sound.speak("I'm ready!")
trackerBasic.main(done)
# hopefully it will be sufficient to start one
ev3.Sound.speak("Exiting!")
log.info("Exiting TRACK3RWithClaw")
done.set()
#touchthread.join()
colorthread.join()
fastthread.join()
|
test_task_manager.py | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import concurrent.futures
import fixtures
import mock
import queue
import threading
from openstack import task_manager
from openstack.tests.unit import base
class TestException(Exception):
pass
class TaskTest(task_manager.Task):
def main(self):
raise TestException("This is a test exception")
class TaskTestGenerator(task_manager.Task):
def main(self):
yield 1
class TaskTestInt(task_manager.Task):
def main(self):
return int(1)
class TaskTestFloat(task_manager.Task):
def main(self):
return float(2.0)
class TaskTestStr(task_manager.Task):
def main(self):
return "test"
class TaskTestBool(task_manager.Task):
def main(self):
return True
class TaskTestSet(task_manager.Task):
def main(self):
return set([1, 2])
class TestTaskManager(base.TestCase):
def setUp(self):
super(TestTaskManager, self).setUp()
self.manager = task_manager.TaskManager(name='test')
def test_wait_re_raise(self):
"""Test that Exceptions thrown in a Task is reraised correctly
This test is aimed to six.reraise(), called in Task::wait().
Specifically, we test if we get the same behaviour with all the
configured interpreters (e.g. py27, p35, ...)
"""
self.assertRaises(TestException, self.manager.submit_task, TaskTest())
def test_dont_munchify_int(self):
ret = self.manager.submit_task(TaskTestInt())
self.assertIsInstance(ret, int)
def test_dont_munchify_float(self):
ret = self.manager.submit_task(TaskTestFloat())
self.assertIsInstance(ret, float)
def test_dont_munchify_str(self):
ret = self.manager.submit_task(TaskTestStr())
self.assertIsInstance(ret, str)
def test_dont_munchify_bool(self):
ret = self.manager.submit_task(TaskTestBool())
self.assertIsInstance(ret, bool)
def test_dont_munchify_set(self):
ret = self.manager.submit_task(TaskTestSet())
self.assertIsInstance(ret, set)
@mock.patch.object(concurrent.futures.ThreadPoolExecutor, 'submit')
def test_async(self, mock_submit):
self.manager.submit_function(set, run_async=True)
self.assertTrue(mock_submit.called)
class ThreadingTaskManager(task_manager.TaskManager):
"""A subclass of TaskManager which exercises the thread-shifting
exception handling behavior."""
def __init__(self, *args, **kw):
super(ThreadingTaskManager, self).__init__(
*args, **kw)
self.queue = queue.Queue()
self._running = True
self._thread = threading.Thread(name=self.name, target=self.run)
self._thread.daemon = True
self.failed = False
def start(self):
self._thread.start()
def stop(self):
self._running = False
self.queue.put(None)
def join(self):
self._thread.join()
def run(self):
# No exception should ever cause this method to hit its
# exception handler.
try:
while True:
task = self.queue.get()
if not task:
if not self._running:
break
continue
self.run_task(task)
self.queue.task_done()
except Exception:
self.failed = True
raise
def submit_task(self, task, raw=False):
# An important part of the exception-shifting feature is that
# this method should raise the exception.
self.queue.put(task)
return task.wait()
class ThreadingTaskManagerFixture(fixtures.Fixture):
def _setUp(self):
self.manager = ThreadingTaskManager(name='threading test')
self.manager.start()
self.addCleanup(self._cleanup)
def _cleanup(self):
self.manager.stop()
self.manager.join()
class TestThreadingTaskManager(base.TestCase):
def setUp(self):
super(TestThreadingTaskManager, self).setUp()
f = self.useFixture(ThreadingTaskManagerFixture())
self.manager = f.manager
def test_wait_re_raise(self):
"""Test that Exceptions thrown in a Task is reraised correctly
This test is aimed to six.reraise(), called in Task::wait().
Specifically, we test if we get the same behaviour with all the
configured interpreters (e.g. py27, p35, ...)
"""
self.assertRaises(TestException, self.manager.submit_task, TaskTest())
# Stop the manager and join the run thread to ensure the
# exception handler has run.
self.manager.stop()
self.manager.join()
self.assertFalse(self.manager.failed)
|
car.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ##
# @brief [py demo] car demo [robotx6 sync test]
# @author Kab Kyoum Kim (kabkyoum.kim@doosan.com)
import rospy
import os
import threading, time
import sys
sys.dont_write_bytecode = True
sys.path.append( os.path.abspath(os.path.join(os.path.dirname(__file__),"../../../../common/imp")) ) # get import path : DSR_ROBOT.py
# for single robot
#import DR_init
#DR_init.__dsr__id = "dsr01"
#DR_init.__dsr__model = "m1013"
#from DSR_ROBOT import *
# for mulit robot
########from DSR_ROBOT_MULTI import *
from DSR_ROBOT import *
NUM_ROBOT = 4
############################################################################################
class CRobotSync:
def __init__(self, r):
self.description = "Sync for Multiple Robos"
self.author = "Doosan Robotics"
self.nRobot = r
self.nIsRun = True
self.nWaitBit = 0
self.nCurBit = 0
self.bIsWait = list()
self.lock = list()
for i in range(r):
self.lock.append( threading.Lock() )
self.bIsWait.append(False)
self.nWaitBit |= 0x1<<i
def CleanUp(self):
if True == self.nIsRun:
self.nIsRun = False
print("~CleanUp()")
def Wait(self, r):
self.bIsWait[r] = True
self.lock[r].acquire()
self.bIsWait[r] = False
return 0
def WakeUp(self, r):
while self.nIsRun:
if(True == self.bIsWait[r]):
self.lock[r].release()
break;
else:
time.sleep(0.01)
return 0
def WakeUpAll(self):
self.nCurBit = 0
while self.nIsRun:
for i in range(self.nRobot):
if(True == self.bIsWait[i]):
self.nCurBit |= 0x1<<i;
if(self.nWaitBit == self.nCurBit):
break;
for i in range(self.nRobot):
self.lock[i].release()
return 0
RobotSync = CRobotSync(NUM_ROBOT)
#######################################################################################
#----------------------------------------------------------------------
J0 = posj(0, 0, 0, 0, 0, 0)
J1 = posj(0, 0, 0, 30, 30, 0)
JReady = [0, -20, 110, 0, 60, 0]
TCP_POS = [0, 0, 0, 0, 0, 0]
J00 = [-180, 0, -145, 0, -35, 0]
J01r = [-180.0, 71.4, -145.0, 0.0, -9.7, 0.0]
J02r = [-180.0, 67.7, -144.0, 0.0, 76.3, 0.0]
J03r = [-180.0, 0.0, 0.0, 0.0, 0.0, 0.0]
J04r = [-90.0, 0.0, 0.0, 0.0, 0.0, 0.0]
J04r1 = [-90.0, 30.0, -60.0, 0.0, 30.0, -0.0]
J04r2 = [-90.0, -45.0, 90.0, 0.0, -45.0, -0.0]
J04r3 = [-90.0, 60.0, -120.0, 0.0, 60.0, -0.0]
J04r4 = [-90.0, 0.0, -0.0, 0.0, 0.0, -0.0]
J05r = [-144.0, -4.0, -84.8, -90.9, 54.0, -1.1]
J07r = [-152.4, 12.4, -78.6, 18.7, -68.3, -37.7]
J08r = [-90.0, 30.0, -120.0, -90.0, -90.0, 0.0]
JEnd = [0.0, -12.6, 101.1, 0.0, 91.5, -0.0]
dREL1 = [0, 0, 350, 0, 0, 0]
dREL2 = [0, 0, -350, 0, 0, 0]
velx = [0, 0]
accx = [0, 0]
vel_spi = [400, 400]
acc_spi = [150, 150]
J1 = [81.2, 20.8, 127.8, 162.5, 56.1, -37.1]
X0 = [-88.7, 799.0, 182.3, 95.7, 93.7, 133.9]
X1 = [304.2, 871.8, 141.5, 99.5, 84.9, 133.4]
X2 = [437.1, 876.9, 362.1, 99.6, 84.0, 132.1]
X3 = [-57.9, 782.4, 478.4, 99.6, 84.0, 132.1]
amp = [0, 0, 0, 30, 30, 0]
period = [0, 0, 0, 3, 6, 0]
x01 = [423.6, 334.5, 651.2, 84.7, -180.0, 84.7]
x02 = [423.6, 34.5, 951.2, 68.2, -180.0, 68.2]
x03 = [423.6, -265.5, 651.2, 76.1, -180.0, 76.1]
x04 = [423.6, 34.5, 351.2, 81.3, -180.0, 81.3]
x0204c = [x02, x04]
def thread_robot1(robot_id, robot_model):
try:
#nRobotID = 0
r = CDsrRobot(robot_id, robot_model)
while not rospy.is_shutdown():
#RobotSync.Wait(nRobotID)
r.movej(J0, v=20, a=20)
#RobotSync.Wait(nRobotID)
r.movej(J1, v=20, a=20)
'''
while not rospy.is_shutdown():
RobotSync.Wait(nRobotID)
movej(JReady, v=20, a=20)
RobotSync.Wait(nRobotID)
movej(J1, v=0, a=0, t=3)
RobotSync.Wait(nRobotID)
movel(X3, velx, accx, t=2.5)
for i in range(0, 1):
RobotSync.Wait(nRobotID)
movel(X2, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(X1, velx, accx, t=1.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(X0, velx, accx, t=2.5)
RobotSync.Wait(nRobotID)
movel(X1, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(X2, velx, accx, t=1.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(X3, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J00, v=60, a=60, t=6)
RobotSync.Wait(nRobotID)
movej(J01r, v=0, a=0, t=2, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J02r, v=0, a=0, t=2, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J03r, v=0, a=0, t=2)
RobotSync.Wait(nRobotID)
movej(J04r, v=0, a=0, t=1.5)
RobotSync.Wait(nRobotID)
movej(J04r1, v=0, a=0, t=2, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J04r2, v=0, a=0, t=4, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J04r3, v=0, a=0, t=4, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J04r4, v=0, a=0, t=2)
RobotSync.Wait(nRobotID)
movej(J05r, v=0, a=0, t=2.5, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(dREL1, velx, accx, t=1, radius=50, ref=DR_TOOL, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(dREL2, velx, accx, t=1.5, radius=100, ref=DR_TOOL, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J07r, v=60, a=60, t=1.5, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J08r, v=60, a=60, t=2)
RobotSync.Wait(nRobotID)
movej(JEnd, v=60, a=60, t=4)
RobotSync.Wait(nRobotID)
move_periodic(amp, period, 0, 1, ref=DR_TOOL)
RobotSync.Wait(nRobotID)
move_spiral(rev=3, rmax=200, lmax=100, v=vel_spi, a=acc_spi, t=0, axis=DR_AXIS_X, ref=DR_TOOL)
RobotSync.Wait(nRobotID)
movel(x01, velx, accx, t=2)
RobotSync.Wait(nRobotID)
movel(x04, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(x03, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(x02, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(x01, velx, accx, t=2)
RobotSync.Wait(nRobotID)
movec(pos1=x02, pos2=x04, v=velx, a=accx, t=4, radius=360, mod=DR_MV_MOD_ABS, ref=DR_BASE)
'''
except Exception as err:
RobotSync.CleanUp()
rospy.loginfo("Runtime Exception : %s" % err)
return 0
def thread_robot2(robot_id, robot_model):
try:
nRobotID = 0
r = CDsrRobot(robot_id, robot_model)
'''
while not rospy.is_shutdown():
RobotSync.Wait(nRobotID)
r.movej(J0, v=20, a=20)
RobotSync.Wait(nRobotID)
r.movej(J1, v=20, a=20)
'''
while not rospy.is_shutdown():
RobotSync.Wait(nRobotID)
r.movej(JReady, v=20, a=20)
RobotSync.Wait(nRobotID)
r.movej(J1, v=0, a=0, t=3)
RobotSync.Wait(nRobotID)
r.movel(X3, velx, accx, t=2.5)
for i in range(0, 1):
RobotSync.Wait(nRobotID)
r.movel(X2, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X1, velx, accx, t=1.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X0, velx, accx, t=2.5)
RobotSync.Wait(nRobotID)
r.movel(X1, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X2, velx, accx, t=1.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X3, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J00, v=60, a=60, t=6)
RobotSync.Wait(nRobotID)
r.movej(J01r, v=0, a=0, t=2, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J02r, v=0, a=0, t=2, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J03r, v=0, a=0, t=2)
RobotSync.Wait(nRobotID)
r.movej(J04r, v=0, a=0, t=1.5)
RobotSync.Wait(nRobotID)
r.movej(J04r1, v=0, a=0, t=2, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J04r2, v=0, a=0, t=4, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J04r3, v=0, a=0, t=4, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J04r4, v=0, a=0, t=2)
RobotSync.Wait(nRobotID)
r.movej(J05r, v=0, a=0, t=2.5, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(dREL1, velx, accx, t=1, radius=50, ref=DR_TOOL, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(dREL2, velx, accx, t=1.5, radius=100, ref=DR_TOOL, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J07r, v=60, a=60, t=1.5, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J08r, v=60, a=60, t=2)
RobotSync.Wait(nRobotID)
r.movej(JEnd, v=60, a=60, t=4)
RobotSync.Wait(nRobotID)
r.move_periodic(amp, period, 0, 1, ref=DR_TOOL)
RobotSync.Wait(nRobotID)
r.move_spiral(rev=3, rmax=200, lmax=100, v=vel_spi, a=acc_spi, t=0, axis=DR_AXIS_X, ref=DR_TOOL)
RobotSync.Wait(nRobotID)
r.movel(x01, velx, accx, t=2)
RobotSync.Wait(nRobotID)
r.movel(x04, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(x03, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(x02, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(x01, velx, accx, t=2)
RobotSync.Wait(nRobotID)
r.movec(pos1=x02, pos2=x04, v=velx, a=accx, t=4, radius=360, mod=DR_MV_MOD_ABS, ref=DR_BASE)
except Exception as err:
RobotSync.CleanUp()
rospy.loginfo("Runtime Exception : %s" % err)
return 0
def thread_robot3(robot_id, robot_model):
try:
#nRobotID = 2
r = CDsrRobot(robot_id, robot_model)
while not rospy.is_shutdown():
# RobotSync.Wait(nRobotID)
r.movej(J0, v=20, a=20)
# RobotSync.Wait(nRobotID)
r.movej(J1, v=20, a=20)
'''
while not rospy.is_shutdown():
RobotSync.Wait(nRobotID)
movej(JReady, v=20, a=20)
RobotSync.Wait(nRobotID)
movej(J1, v=0, a=0, t=3)
RobotSync.Wait(nRobotID)
movel(X3, velx, accx, t=2.5)
for i in range(0, 1):
RobotSync.Wait(nRobotID)
movel(X2, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(X1, velx, accx, t=1.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(X0, velx, accx, t=2.5)
RobotSync.Wait(nRobotID)
movel(X1, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(X2, velx, accx, t=1.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(X3, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J00, v=60, a=60, t=6)
RobotSync.Wait(nRobotID)
movej(J01r, v=0, a=0, t=2, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J02r, v=0, a=0, t=2, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J03r, v=0, a=0, t=2)
RobotSync.Wait(nRobotID)
movej(J04r, v=0, a=0, t=1.5)
RobotSync.Wait(nRobotID)
movej(J04r1, v=0, a=0, t=2, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J04r2, v=0, a=0, t=4, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J04r3, v=0, a=0, t=4, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J04r4, v=0, a=0, t=2)
RobotSync.Wait(nRobotID)
movej(J05r, v=0, a=0, t=2.5, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(dREL1, velx, accx, t=1, radius=50, ref=DR_TOOL, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(dREL2, velx, accx, t=1.5, radius=100, ref=DR_TOOL, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J07r, v=60, a=60, t=1.5, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movej(J08r, v=60, a=60, t=2)
RobotSync.Wait(nRobotID)
movej(JEnd, v=60, a=60, t=4)
RobotSync.Wait(nRobotID)
move_periodic(amp, period, 0, 1, ref=DR_TOOL)
RobotSync.Wait(nRobotID)
move_spiral(rev=3, rmax=200, lmax=100, v=vel_spi, a=acc_spi, t=0, axis=DR_AXIS_X, ref=DR_TOOL)
RobotSync.Wait(nRobotID)
movel(x01, velx, accx, t=2)
RobotSync.Wait(nRobotID)
movel(x04, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(x03, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(x02, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
movel(x01, velx, accx, t=2)
RobotSync.Wait(nRobotID)
movec(pos1=x02, pos2=x04, v=velx, a=accx, t=4, radius=360, mod=DR_MV_MOD_ABS, ref=DR_BASE)
'''
except Exception as err:
RobotSync.CleanUp()
rospy.loginfo("Runtime Exception : %s" % err)
return 0
def thread_robot4(robot_id, robot_model):
try:
nRobotID = 1
r = CDsrRobot(robot_id, robot_model)
'''
while not rospy.is_shutdown():
RobotSync.Wait(nRobotID)
r.movej(J0, v=20, a=20)
RobotSync.Wait(nRobotID)
r.movej(J1, v=20, a=20)
'''
while not rospy.is_shutdown():
RobotSync.Wait(nRobotID)
r.movej(JReady, v=20, a=20)
RobotSync.Wait(nRobotID)
r.movej(J1, v=0, a=0, t=3)
RobotSync.Wait(nRobotID)
r.movel(X3, velx, accx, t=2.5)
for i in range(0, 1):
RobotSync.Wait(nRobotID)
r.movel(X2, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X1, velx, accx, t=1.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X0, velx, accx, t=2.5)
RobotSync.Wait(nRobotID)
r.movel(X1, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X2, velx, accx, t=1.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X3, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J00, v=60, a=60, t=6)
RobotSync.Wait(nRobotID)
r.movej(J01r, v=0, a=0, t=2, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J02r, v=0, a=0, t=2, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J03r, v=0, a=0, t=2)
RobotSync.Wait(nRobotID)
r.movej(J04r, v=0, a=0, t=1.5)
RobotSync.Wait(nRobotID)
r.movej(J04r1, v=0, a=0, t=2, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J04r2, v=0, a=0, t=4, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J04r3, v=0, a=0, t=4, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J04r4, v=0, a=0, t=2)
RobotSync.Wait(nRobotID)
r.movej(J05r, v=0, a=0, t=2.5, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(dREL1, velx, accx, t=1, radius=50, ref=DR_TOOL, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(dREL2, velx, accx, t=1.5, radius=100, ref=DR_TOOL, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J07r, v=60, a=60, t=1.5, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J08r, v=60, a=60, t=2)
RobotSync.Wait(nRobotID)
r.movej(JEnd, v=60, a=60, t=4)
RobotSync.Wait(nRobotID)
r.move_periodic(amp, period, 0, 1, ref=DR_TOOL)
RobotSync.Wait(nRobotID)
r.move_spiral(rev=3, rmax=200, lmax=100, v=vel_spi, a=acc_spi, t=0, axis=DR_AXIS_X, ref=DR_TOOL)
RobotSync.Wait(nRobotID)
r.movel(x01, velx, accx, t=2)
RobotSync.Wait(nRobotID)
r.movel(x04, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(x03, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(x02, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(x01, velx, accx, t=2)
RobotSync.Wait(nRobotID)
r.movec(pos1=x02, pos2=x04, v=velx, a=accx, t=4, radius=360, mod=DR_MV_MOD_ABS, ref=DR_BASE)
except Exception as err:
RobotSync.CleanUp()
rospy.loginfo("Runtime Exception : %s" % err)
return 0
def thread_robot5(robot_id, robot_model):
try:
nRobotID = 2
r = CDsrRobot(robot_id, robot_model)
'''
while not rospy.is_shutdown():
RobotSync.Wait(nRobotID)
r.movej(J0, v=20, a=20)
RobotSync.Wait(nRobotID)
r.movej(J1, v=20, a=20)
'''
while not rospy.is_shutdown():
RobotSync.Wait(nRobotID)
r.movej(JReady, v=20, a=20)
RobotSync.Wait(nRobotID)
r.movej(J1, v=0, a=0, t=3)
RobotSync.Wait(nRobotID)
r.movel(X3, velx, accx, t=2.5)
for i in range(0, 1):
RobotSync.Wait(nRobotID)
r.movel(X2, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X1, velx, accx, t=1.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X0, velx, accx, t=2.5)
RobotSync.Wait(nRobotID)
r.movel(X1, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X2, velx, accx, t=1.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X3, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J00, v=60, a=60, t=6)
RobotSync.Wait(nRobotID)
r.movej(J01r, v=0, a=0, t=2, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J02r, v=0, a=0, t=2, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J03r, v=0, a=0, t=2)
RobotSync.Wait(nRobotID)
r.movej(J04r, v=0, a=0, t=1.5)
RobotSync.Wait(nRobotID)
r.movej(J04r1, v=0, a=0, t=2, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J04r2, v=0, a=0, t=4, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J04r3, v=0, a=0, t=4, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J04r4, v=0, a=0, t=2)
RobotSync.Wait(nRobotID)
r.movej(J05r, v=0, a=0, t=2.5, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(dREL1, velx, accx, t=1, radius=50, ref=DR_TOOL, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(dREL2, velx, accx, t=1.5, radius=100, ref=DR_TOOL, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J07r, v=60, a=60, t=1.5, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J08r, v=60, a=60, t=2)
RobotSync.Wait(nRobotID)
r.movej(JEnd, v=60, a=60, t=4)
RobotSync.Wait(nRobotID)
r.move_periodic(amp, period, 0, 1, ref=DR_TOOL)
RobotSync.Wait(nRobotID)
r.move_spiral(rev=3, rmax=200, lmax=100, v=vel_spi, a=acc_spi, t=0, axis=DR_AXIS_X, ref=DR_TOOL)
RobotSync.Wait(nRobotID)
r.movel(x01, velx, accx, t=2)
RobotSync.Wait(nRobotID)
r.movel(x04, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(x03, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(x02, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(x01, velx, accx, t=2)
RobotSync.Wait(nRobotID)
r.movec(pos1=x02, pos2=x04, v=velx, a=accx, t=4, radius=360, mod=DR_MV_MOD_ABS, ref=DR_BASE)
except Exception as err:
RobotSync.CleanUp()
rospy.loginfo("Runtime Exception : %s" % err)
return 0
def thread_robot6(robot_id, robot_model):
try:
nRobotID = 3
r = CDsrRobot(robot_id, robot_model)
'''
while not rospy.is_shutdown():
RobotSync.Wait(nRobotID)
r.movej(J0, v=20, a=20)
RobotSync.Wait(nRobotID)
r.movej(J1, v=20, a=20)
'''
while not rospy.is_shutdown():
RobotSync.Wait(nRobotID)
r.movej(JReady, v=20, a=20)
RobotSync.Wait(nRobotID)
r.movej(J1, v=0, a=0, t=3)
RobotSync.Wait(nRobotID)
r.movel(X3, velx, accx, t=2.5)
for i in range(0, 1):
RobotSync.Wait(nRobotID)
r.movel(X2, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X1, velx, accx, t=1.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X0, velx, accx, t=2.5)
RobotSync.Wait(nRobotID)
r.movel(X1, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X2, velx, accx, t=1.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(X3, velx, accx, t=2.5, radius=50, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J00, v=60, a=60, t=6)
RobotSync.Wait(nRobotID)
r.movej(J01r, v=0, a=0, t=2, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J02r, v=0, a=0, t=2, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J03r, v=0, a=0, t=2)
RobotSync.Wait(nRobotID)
r.movej(J04r, v=0, a=0, t=1.5)
RobotSync.Wait(nRobotID)
r.movej(J04r1, v=0, a=0, t=2, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J04r2, v=0, a=0, t=4, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J04r3, v=0, a=0, t=4, radius=50, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J04r4, v=0, a=0, t=2)
RobotSync.Wait(nRobotID)
r.movej(J05r, v=0, a=0, t=2.5, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(dREL1, velx, accx, t=1, radius=50, ref=DR_TOOL, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(dREL2, velx, accx, t=1.5, radius=100, ref=DR_TOOL, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J07r, v=60, a=60, t=1.5, radius=100, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movej(J08r, v=60, a=60, t=2)
RobotSync.Wait(nRobotID)
r.movej(JEnd, v=60, a=60, t=4)
RobotSync.Wait(nRobotID)
r.move_periodic(amp, period, 0, 1, ref=DR_TOOL)
RobotSync.Wait(nRobotID)
r.move_spiral(rev=3, rmax=200, lmax=100, v=vel_spi, a=acc_spi, t=0, axis=DR_AXIS_X, ref=DR_TOOL)
RobotSync.Wait(nRobotID)
r.movel(x01, velx, accx, t=2)
RobotSync.Wait(nRobotID)
r.movel(x04, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(x03, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(x02, velx, accx, t=2, radius=100, ref=DR_BASE, mod=DR_MV_MOD_ABS)
RobotSync.Wait(nRobotID)
r.movel(x01, velx, accx, t=2)
RobotSync.Wait(nRobotID)
r.movec(pos1=x02, pos2=x04, v=velx, a=accx, t=4, radius=360, mod=DR_MV_MOD_ABS, ref=DR_BASE)
except Exception as err:
RobotSync.CleanUp()
rospy.loginfo("Runtime Exception : %s" % err)
return 0
def shutdown():
print("shutdown time!")
print("shutdown time!")
print("shutdown time!")
pub_stop_r1.publish(stop_mode=STOP_TYPE_QUICK)
pub_stop_r2.publish(stop_mode=STOP_TYPE_QUICK)
pub_stop_r3.publish(stop_mode=STOP_TYPE_QUICK)
pub_stop_r4.publish(stop_mode=STOP_TYPE_QUICK)
pub_stop_r5.publish(stop_mode=STOP_TYPE_QUICK)
pub_stop_r6.publish(stop_mode=STOP_TYPE_QUICK)
return 0
if __name__ == "__main__":
rospy.init_node('car_py')
rospy.on_shutdown(shutdown)
robot_id1 = "dsr01"; robot_model1 = "m0617"
robot_id2 = "dsr02"; robot_model2 = "m1013"
robot_id3 = "dsr03"; robot_model3 = "m1509"
robot_id4 = "dsr04"; robot_model4 = "m1013"
robot_id5 = "dsr05"; robot_model5 = "m1013"
robot_id6 = "dsr06"; robot_model6 = "m1013"
pub_stop_r1 = rospy.Publisher('/'+ robot_id1 + robot_model1 +'/stop', RobotStop, queue_size=10)
pub_stop_r2 = rospy.Publisher('/'+ robot_id2 + robot_model2 +'/stop', RobotStop, queue_size=10)
pub_stop_r3 = rospy.Publisher('/'+ robot_id3 + robot_model1 +'/stop', RobotStop, queue_size=10)
pub_stop_r4 = rospy.Publisher('/'+ robot_id4 + robot_model2 +'/stop', RobotStop, queue_size=10)
pub_stop_r5 = rospy.Publisher('/'+ robot_id5 + robot_model1 +'/stop', RobotStop, queue_size=10)
pub_stop_r6 = rospy.Publisher('/'+ robot_id6 + robot_model2 +'/stop', RobotStop, queue_size=10)
#RobotSync = CRobotSync(NUM_ROBOT)
t1 = threading.Thread(target=thread_robot1, args=(robot_id1, robot_model1))
t1.daemon = True
t1.start()
t2 = threading.Thread(target=thread_robot2, args=(robot_id2, robot_model2))
t2.daemon = True
t2.start()
t3 = threading.Thread(target=thread_robot3, args=(robot_id3, robot_model3))
t3.daemon = True
t3.start()
t4 = threading.Thread(target=thread_robot4, args=(robot_id4, robot_model4))
t4.daemon = True
t4.start()
t5 = threading.Thread(target=thread_robot5, args=(robot_id5, robot_model5))
t5.daemon = True
t5.start()
t6 = threading.Thread(target=thread_robot6, args=(robot_id6, robot_model6))
t6.daemon = True
t6.start()
time.sleep(5)
while not rospy.is_shutdown():
time.sleep(0.01)
RobotSync.WakeUpAll()
#----------------------------------------------------------------------
print('good bye!')
|
test_lock.py | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import random
import threading
import time
from concurrent import futures
import fasteners
from fasteners import test
from fasteners import _utils
# NOTE(harlowja): Sleep a little so now() can not be the same (which will
# cause false positives when our overlap detection code runs). If there are
# real overlaps then they will still exist.
NAPPY_TIME = 0.05
# We will spend this amount of time doing some "fake" work.
WORK_TIMES = [(0.01 + x / 100.0) for x in range(0, 5)]
# If latches/events take longer than this to become empty/set, something is
# usually wrong and should be debugged instead of deadlocking...
WAIT_TIMEOUT = 300
def _find_overlaps(times, start, end):
overlaps = 0
for (s, e) in times:
if s >= start and e <= end:
overlaps += 1
return overlaps
def _spawn_variation(readers, writers, max_workers=None):
start_stops = collections.deque()
lock = fasteners.ReaderWriterLock()
def read_func(ident):
with lock.read_lock():
# TODO(harlowja): sometime in the future use a monotonic clock here
# to avoid problems that can be caused by ntpd resyncing the clock
# while we are actively running.
enter_time = _utils.now()
time.sleep(WORK_TIMES[ident % len(WORK_TIMES)])
exit_time = _utils.now()
start_stops.append((lock.READER, enter_time, exit_time))
time.sleep(NAPPY_TIME)
def write_func(ident):
with lock.write_lock():
enter_time = _utils.now()
time.sleep(WORK_TIMES[ident % len(WORK_TIMES)])
exit_time = _utils.now()
start_stops.append((lock.WRITER, enter_time, exit_time))
time.sleep(NAPPY_TIME)
if max_workers is None:
max_workers = max(0, readers) + max(0, writers)
if max_workers > 0:
with futures.ThreadPoolExecutor(max_workers=max_workers) as e:
count = 0
for _i in range(0, readers):
e.submit(read_func, count)
count += 1
for _i in range(0, writers):
e.submit(write_func, count)
count += 1
writer_times = []
reader_times = []
for (lock_type, start, stop) in list(start_stops):
if lock_type == lock.WRITER:
writer_times.append((start, stop))
else:
reader_times.append((start, stop))
return (writer_times, reader_times)
def _daemon_thread(target):
t = threading.Thread(target=target)
t.daemon = True
return t
class ReadWriteLockTest(test.TestCase):
THREAD_COUNT = 20
def test_no_double_writers(self):
lock = fasteners.ReaderWriterLock()
watch = _utils.StopWatch(duration=5)
watch.start()
dups = collections.deque()
active = collections.deque()
def acquire_check(me):
with lock.write_lock():
if len(active) >= 1:
dups.append(me)
dups.extend(active)
active.append(me)
try:
time.sleep(random.random() / 100)
finally:
active.remove(me)
def run():
me = threading.current_thread()
while not watch.expired():
acquire_check(me)
threads = []
for i in range(0, self.THREAD_COUNT):
t = _daemon_thread(run)
threads.append(t)
t.start()
while threads:
t = threads.pop()
t.join()
self.assertEqual([], list(dups))
self.assertEqual([], list(active))
def test_no_concurrent_readers_writers(self):
lock = fasteners.ReaderWriterLock()
watch = _utils.StopWatch(duration=5)
watch.start()
dups = collections.deque()
active = collections.deque()
def acquire_check(me, reader):
if reader:
lock_func = lock.read_lock
else:
lock_func = lock.write_lock
with lock_func():
if not reader:
# There should be no-one else currently active, if there
# is ensure we capture them so that we can later blow-up
# the test.
if len(active) >= 1:
dups.append(me)
dups.extend(active)
active.append(me)
try:
time.sleep(random.random() / 100)
finally:
active.remove(me)
def run():
me = threading.current_thread()
while not watch.expired():
acquire_check(me, random.choice([True, False]))
threads = []
for i in range(0, self.THREAD_COUNT):
t = _daemon_thread(run)
threads.append(t)
t.start()
while threads:
t = threads.pop()
t.join()
self.assertEqual([], list(dups))
self.assertEqual([], list(active))
def test_writer_abort(self):
lock = fasteners.ReaderWriterLock()
self.assertFalse(lock.owner)
def blow_up():
with lock.write_lock():
self.assertEqual(lock.WRITER, lock.owner)
raise RuntimeError("Broken")
self.assertRaises(RuntimeError, blow_up)
self.assertFalse(lock.owner)
def test_reader_abort(self):
lock = fasteners.ReaderWriterLock()
self.assertFalse(lock.owner)
def blow_up():
with lock.read_lock():
self.assertEqual(lock.READER, lock.owner)
raise RuntimeError("Broken")
self.assertRaises(RuntimeError, blow_up)
self.assertFalse(lock.owner)
def test_double_reader_abort(self):
lock = fasteners.ReaderWriterLock()
activated = collections.deque()
def double_bad_reader():
with lock.read_lock():
with lock.read_lock():
raise RuntimeError("Broken")
def happy_writer():
with lock.write_lock():
activated.append(lock.owner)
with futures.ThreadPoolExecutor(max_workers=20) as e:
for i in range(0, 20):
if i % 2 == 0:
e.submit(double_bad_reader)
else:
e.submit(happy_writer)
self.assertEqual(10, len([a for a in activated if a == 'w']))
def test_double_reader_writer(self):
lock = fasteners.ReaderWriterLock()
activated = collections.deque()
active = threading.Event()
def double_reader():
with lock.read_lock():
active.set()
while not lock.has_pending_writers:
time.sleep(0.001)
with lock.read_lock():
activated.append(lock.owner)
def happy_writer():
with lock.write_lock():
activated.append(lock.owner)
reader = _daemon_thread(double_reader)
reader.start()
active.wait(WAIT_TIMEOUT)
self.assertTrue(active.is_set())
writer = _daemon_thread(happy_writer)
writer.start()
reader.join()
writer.join()
self.assertEqual(2, len(activated))
self.assertEqual(['r', 'w'], list(activated))
def test_reader_chaotic(self):
lock = fasteners.ReaderWriterLock()
activated = collections.deque()
def chaotic_reader(blow_up):
with lock.read_lock():
if blow_up:
raise RuntimeError("Broken")
else:
activated.append(lock.owner)
def happy_writer():
with lock.write_lock():
activated.append(lock.owner)
with futures.ThreadPoolExecutor(max_workers=20) as e:
for i in range(0, 20):
if i % 2 == 0:
e.submit(chaotic_reader, blow_up=bool(i % 4 == 0))
else:
e.submit(happy_writer)
writers = [a for a in activated if a == 'w']
readers = [a for a in activated if a == 'r']
self.assertEqual(10, len(writers))
self.assertEqual(5, len(readers))
def test_writer_chaotic(self):
lock = fasteners.ReaderWriterLock()
activated = collections.deque()
def chaotic_writer(blow_up):
with lock.write_lock():
if blow_up:
raise RuntimeError("Broken")
else:
activated.append(lock.owner)
def happy_reader():
with lock.read_lock():
activated.append(lock.owner)
with futures.ThreadPoolExecutor(max_workers=20) as e:
for i in range(0, 20):
if i % 2 == 0:
e.submit(chaotic_writer, blow_up=bool(i % 4 == 0))
else:
e.submit(happy_reader)
writers = [a for a in activated if a == 'w']
readers = [a for a in activated if a == 'r']
self.assertEqual(5, len(writers))
self.assertEqual(10, len(readers))
def test_writer_reader_writer(self):
lock = fasteners.ReaderWriterLock()
with lock.write_lock():
self.assertTrue(lock.is_writer())
with lock.read_lock():
self.assertTrue(lock.is_reader())
with lock.write_lock():
self.assertTrue(lock.is_writer())
def test_single_reader_writer(self):
results = []
lock = fasteners.ReaderWriterLock()
with lock.read_lock():
self.assertTrue(lock.is_reader())
self.assertEqual(0, len(results))
with lock.write_lock():
results.append(1)
self.assertTrue(lock.is_writer())
with lock.read_lock():
self.assertTrue(lock.is_reader())
self.assertEqual(1, len(results))
self.assertFalse(lock.is_reader())
self.assertFalse(lock.is_writer())
def test_reader_to_writer(self):
lock = fasteners.ReaderWriterLock()
def writer_func():
with lock.write_lock():
pass
with lock.read_lock():
self.assertRaises(RuntimeError, writer_func)
self.assertFalse(lock.is_writer())
self.assertFalse(lock.is_reader())
self.assertFalse(lock.is_writer())
def test_writer_to_reader(self):
lock = fasteners.ReaderWriterLock()
def reader_func():
with lock.read_lock():
self.assertTrue(lock.is_writer())
self.assertTrue(lock.is_reader())
with lock.write_lock():
self.assertIsNone(reader_func())
self.assertFalse(lock.is_reader())
self.assertFalse(lock.is_reader())
self.assertFalse(lock.is_writer())
def test_double_writer(self):
lock = fasteners.ReaderWriterLock()
with lock.write_lock():
self.assertFalse(lock.is_reader())
self.assertTrue(lock.is_writer())
with lock.write_lock():
self.assertTrue(lock.is_writer())
self.assertTrue(lock.is_writer())
self.assertFalse(lock.is_reader())
self.assertFalse(lock.is_writer())
def test_double_reader(self):
lock = fasteners.ReaderWriterLock()
with lock.read_lock():
self.assertTrue(lock.is_reader())
self.assertFalse(lock.is_writer())
with lock.read_lock():
self.assertTrue(lock.is_reader())
self.assertTrue(lock.is_reader())
self.assertFalse(lock.is_reader())
self.assertFalse(lock.is_writer())
def test_multi_reader_multi_writer(self):
writer_times, reader_times = _spawn_variation(10, 10)
self.assertEqual(10, len(writer_times))
self.assertEqual(10, len(reader_times))
for (start, stop) in writer_times:
self.assertEqual(0, _find_overlaps(reader_times, start, stop))
self.assertEqual(1, _find_overlaps(writer_times, start, stop))
for (start, stop) in reader_times:
self.assertEqual(0, _find_overlaps(writer_times, start, stop))
def test_multi_reader_single_writer(self):
writer_times, reader_times = _spawn_variation(9, 1)
self.assertEqual(1, len(writer_times))
self.assertEqual(9, len(reader_times))
start, stop = writer_times[0]
self.assertEqual(0, _find_overlaps(reader_times, start, stop))
def test_multi_writer(self):
writer_times, reader_times = _spawn_variation(0, 10)
self.assertEqual(10, len(writer_times))
self.assertEqual(0, len(reader_times))
for (start, stop) in writer_times:
self.assertEqual(1, _find_overlaps(writer_times, start, stop))
|
main1.py | from flask import Flask, render_template
from flask.ext.socketio import SocketIO, send, emit
from twisted.internet.protocol import Protocol, Factory
from twisted.internet import reactor
from threading import Thread
import RPi.GPIO as GPIO
import datetime
import os
GPIO.setmode(GPIO.BCM) ## Use board pin numbering
##GPIO.setup(7, GPIO.OUT) ## Setup GPIO Pin 7 to OUT
GPIO.setup(2, GPIO.IN, pull_up_down=GPIO.PUD_UP)
reset = True
def twistedServer():
def RaspberryLight(Protocol):
def connectionMade(self):
#self.transport.write("""connected""")
self.factory.clients.append(self)
print "clients are ", self.factory.clients
def connectionLost(self, reason):
print "connection lost ", self.factory.clients
self.factory.clients.remove(self)
def dataReceived(self, data):
global reset
msg = ""
## if (data == 'P7H'):
## msg = "Pin 7 is now High"
## GPIO.output(7, True)
##
## elif (data == 'P7L'):
## msg = "Pin 7 is now Low"
## GPIO.output(7, False)
if (data == 'test'):
msg = "YAY THE PHONE SENT A MESSAGE as;dfjasl;ldjflkasdjfasjdflsajflksajdlfjasdkfjas;l"
elif (data == 'reset door'):
reset = True
print "reset door"
print msg
factory = Factory()
factory.protocol = RaspberryLight
factory.clients = []
reactor.listenTCP(7777, factory)
print "RaspberryLight server started"
reactor.run()
def socketServer():
app = Flask(__name__)
socketio = SocketIO(app)
@socketio.on('appConnected')
def app_connected(json):
print('received json: ' + str(json))
@socketio.on('connect', namespace="/main")
def startSocket():
print "test message2"
@app.route('/')
def load():
return render_template("betaIndex.html")
@app.route('/launch')
def loadLaunch():
return render_template("testLaunch.html")
if __name__ == "__main__":
socketio.run(app, "localhost", 80)
twisted = Thread(target=twistedServer)
twisted.start()
socket = Thread(target=socketServer)
socket.start()
##def doorSensor():
## global reset
## while True:
## if reset == True:
## if GPIO.input(2) == GPIO.LOW:
## doorLog = open("Door_Log.txt", "w")
## doorLog.write("Door Opened At: " + datetime.datetime.strftime(datetime.datetime.now(), "%Y-%m-%d %H:%M:%S"))
## doorLog.close()
## reset = False
## else:
## print "it's closed"
##
##door = Thread(target=doorSensor)
##door.daemon = True
##door.start()
|
dynamic_teacher.py | import time
from typing import Any, Dict, Iterator, List
import copy
import torch
import torch.nn as nn
import torch.multiprocessing as mp
import traceback
from allennlp.data.data_loaders.multiprocess_data_loader import WorkerError
from allennlp.nn.util import move_to_device
from matchmaker.utils.config import *
from matchmaker.models.all import get_model, get_word_embedder, build_model
from allennlp.data.data_loaders.data_loader import DataLoader, TensorDict
from transformers.file_utils import cached_path,hf_bucket_url,WEIGHTS_NAME
mp.set_sharing_strategy("file_system") # VERY MUCH needed for linux !! makes everything MUCH faster
from torch.nn.parallel.scatter_gather import scatter_kwargs, gather
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.cuda._utils import _get_device_index
from rich.console import Console
def data_parallel_prepare(module, device_ids=None):
r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module (Module): the module to evaluate in parallel
inputs (Tensor): inputs to the module
device_ids (list of int or torch.device): GPU ids on which to replicate module
output_device (list of int or torch.device): GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Tensor containing the result of module(input) located on
output_device
"""
#if not isinstance(inputs, tuple):
# inputs = (inputs,)
#device_type = _get_available_device_type()
#if device_ids is None:
# device_ids = _get_all_device_indices()
#if output_device is None:
# output_device = device_ids[0]
device_ids = list(map(lambda x: _get_device_index(x, True), device_ids))
#output_device = _get_device_index(output_device, True)
#src_device_obj = torch.device(device_type, device_ids[0])
#for t in chain(module.parameters(), module.buffers()):
# if t.device != src_device_obj:
# raise RuntimeError("module must have its parameters and buffers "
# "on device {} (device_ids[0]) but found one of "
# "them on device: {}".format(src_device_obj, t.device))
#inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
#if len(device_ids) == 1:
# return module(*inputs[0], **module_kwargs[0])
#used_device_ids = #device_ids[:len(inputs)]
replicas = replicate(module, device_ids)
#outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return replicas
def data_parallel_forward(replicas, inputs, device_ids=None, output_device=None, dim=0, module_kwargs=None):
r"""Evaluates module(input) in parallel across the GPUs given in device_ids.
This is the functional version of the DataParallel module.
Args:
module (Module): the module to evaluate in parallel
inputs (Tensor): inputs to the module
device_ids (list of int or torch.device): GPU ids on which to replicate module
output_device (list of int or torch.device): GPU location of the output Use -1 to indicate the CPU.
(default: device_ids[0])
Returns:
a Tensor containing the result of module(input) located on
output_device
"""
if not isinstance(inputs, tuple):
inputs = (inputs,)
#device_type = _get_available_device_type()
#if device_ids is None:
# device_ids = _get_all_device_indices()
if output_device is None:
output_device = device_ids[0]
device_ids = list(map(lambda x: _get_device_index(x, True), device_ids))
output_device = _get_device_index(output_device, True)
#src_device_obj = torch.device(device_type, device_ids[0])
#for t in chain(module.parameters(), module.buffers()):
# if t.device != src_device_obj:
# raise RuntimeError("module must have its parameters and buffers "
# "on device {} (device_ids[0]) but found one of "
# "them on device: {}".format(src_device_obj, t.device))
inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim)
#if len(device_ids) == 1:
# return module(*inputs[0], **module_kwargs[0])
used_device_ids = device_ids[:len(inputs)]
#replicas = replicate(module, used_device_ids)
outputs = parallel_apply(replicas, inputs, module_kwargs, used_device_ids)
return gather(outputs, output_device, dim)
class DynamicTeacher():
'''
Wraps a trained model checkpoint and the training batch queue to score (inference only) samples from the batch
'''
def __init__(self,
config: Dict[str,Any],
dataloader:DataLoader,
logger):
super().__init__()
self.config = config
self.dynamic_teacher_path = config["dynamic_teacher_path"]
self.dynamic_teacher_in_batch_scoring = config["dynamic_teacher_in_batch_scoring"]
self.dynamic_teacher_per_term_scores = config.get("dynamic_teacher_per_term_scores",False)
self.wrapped_dataloader = dataloader
self.cuda_device = torch.cuda.device_count() - 1 # [torch.cuda.device_count() - 2,torch.cuda.device_count() - 1] # take the last gpu
self.logger = logger
def __iter__(self) -> Iterator[TensorDict]:
ctx = mp.get_context("spawn") # need spawn here, otherwise CUDA fails
queue: mp.JoinableQueue = ctx.JoinableQueue()
worker = ctx.Process(
target=self.dynamic_teacher_subprocess, args=(queue,), #daemon=True
)
worker.start()
try:
for batch, worker_error in iter(queue.get, (None, None)):
if worker_error is not None:
e, tb = worker_error
raise WorkerError(e, tb)
yield batch
queue.task_done()
finally:
if hasattr(queue, "close"): # for compat with different Python versions.
queue.close() # type: ignore[attr-defined]
if worker.is_alive():
worker.terminate()
def dynamic_teacher_subprocess(self, queue):
try:
console = Console()
console.log("[DynamicTeacher] Load teacher model from: " + self.dynamic_teacher_path)
#
# load model
#
model_config = get_config_single(self.dynamic_teacher_path)
word_embedder, padding_idx = get_word_embedder(model_config)
model, encoder_type = get_model(model_config,word_embedder,padding_idx)
model = build_model(model,encoder_type,word_embedder,model_config)
model.is_teacher_model = True
if model_config.get("model_checkpoint_from_huggingface",False):
model_path = cached_path(hf_bucket_url(self.dynamic_teacher_path, WEIGHTS_NAME))
else:
model_path = os.path.join(self.dynamic_teacher_path, "best-model.pytorch-state-dict")
load_result = model.load_state_dict(torch.load(model_path,map_location="cpu"),strict=False)
self.logger.info('[DynamicTeacher] Warmstart init model from: %s', model_path)
self.logger.info(load_result)
console.log("[DynamicTeacher] Warmstart Result:",load_result)
model = model.eval()
use_multi = False
if type(self.cuda_device) == int:
model = model.cuda(self.cuda_device)
#
# multi gpu
#
else:
use_multi = True
model = model.cuda(self.cuda_device[0])
replicas = data_parallel_prepare(model,self.cuda_device)
use_fp16 = model_config["use_fp16"]
concated_sequences = False
if model_config["token_embedder_type"] == "bert_cat":
concated_sequences = True
#use_title_body_sep = model_config["use_title_body_sep"]
#train_sparsity = model_config["minimize_sparsity"]
#train_qa_spans = model_config["train_qa_spans"]
#
# connect to pipeline
#
console.log("[DynamicTeacher] Run Teacher Inference ...")
with torch.no_grad(), torch.cuda.amp.autocast(enabled=use_fp16):
for orig_batch in self.wrapped_dataloader:
if use_multi:
batch = move_to_device(copy.deepcopy(orig_batch), self.cuda_device[0])
batch_neg = move_to_device(copy.deepcopy(orig_batch), self.cuda_device[1])
else:
batch = move_to_device(copy.deepcopy(orig_batch), self.cuda_device)
batch_neg = batch
pos_in = []
neg_in = []
if concated_sequences:
pos_in.append(batch["doc_pos_tokens"])
neg_in.append(batch_neg["doc_neg_tokens"])
else:
pos_in += [batch["query_tokens"],batch["doc_pos_tokens"]]
neg_in += [batch_neg["query_tokens"],batch_neg["doc_neg_tokens"]]
#if use_title_body_sep:
# pos_in.append(batch["title_pos_tokens"])
# neg_in.append(batch_neg["title_neg_tokens"])
#if train_qa_spans: # add start positions for qa training (used to anchor end logits on the start ground truth)
# pos_in.append(batch["pos_qa_start"])
#
# run model forward
#
if use_multi:
output_pos, output_neg = parallel_apply(replicas, [pos_in,neg_in], [{"use_fp16": use_fp16},{"use_fp16": use_fp16}], self.cuda_device)
#output_neg = data_parallel_forward(replicas, *neg_in, device_ids=cuda_device, use_fp16 = use_fp16)
output_pos, query_vecs_pos, doc_vecs_pos = output_pos
output_neg, query_vecs_neg, doc_vecs_neg = output_neg
# colbert model
ib_output_pos = model.forward_inbatch_aggregation(query_vecs_pos,batch["query_tokens"]["attention_mask"], doc_vecs_pos, batch["doc_pos_tokens"]["attention_mask"])
ib_output_neg = model.forward_inbatch_aggregation(query_vecs_neg,batch["query_tokens"]["attention_mask"], doc_vecs_neg, batch["doc_neg_tokens"]["attention_mask"])
orig_batch["dyn_teacher_scores_pos"] = ib_output_pos.cpu()
orig_batch["dyn_teacher_scores_neg"] = ib_output_neg.cpu()
else:
output_pos = model.forward(*pos_in, use_fp16 = use_fp16)
output_neg = model.forward(*neg_in, use_fp16 = use_fp16)
#if train_qa_spans:
# output,answerability,qa_logits_start,qa_logits_end = output
#answerability = answerability.cpu().float()
#qa_logits_start = qa_logits_start.cpu().float()
#qa_logits_end = qa_logits_end.cpu().float()
#if train_sparsity:
# output, cache_parts_out, sparsity_vec,sparsity_stats = output
if self.dynamic_teacher_per_term_scores:
(*output_pos, per_term_scores_pos) = output_pos
(*output_neg, per_term_scores_neg) = output_neg
orig_batch["dyn_teacher_per_term_scores_pos"] = per_term_scores_pos.cpu()
orig_batch["dyn_teacher_per_term_scores_neg"] = per_term_scores_neg.cpu()
if self.dynamic_teacher_in_batch_scoring:
output_pos, query_vecs_pos, doc_vecs_pos = output_pos
output_neg, query_vecs_neg, doc_vecs_neg = output_neg
# colbert model
ib_output_pos = model.forward_inbatch_aggregation(query_vecs_pos,batch["query_tokens"]["attention_mask"], doc_vecs_pos, batch["doc_pos_tokens"]["attention_mask"])
ib_output_neg = model.forward_inbatch_aggregation(query_vecs_neg,batch["query_tokens"]["attention_mask"], doc_vecs_neg, batch["doc_neg_tokens"]["attention_mask"])
orig_batch["dyn_teacher_scores_pos"] = ib_output_pos.cpu()
orig_batch["dyn_teacher_scores_neg"] = ib_output_neg.cpu()
else:
orig_batch["dyn_teacher_scores_pos"] = output_pos.cpu()
orig_batch["dyn_teacher_scores_neg"] = output_neg.cpu()
queue.put((orig_batch,None)) # this moves the tensors in to shared memory
except Exception as e:
queue.put((None, (repr(e), traceback.format_exc())))
queue.put((None, None))
# Wait until this process can safely exit.
queue.join() |
test_ssl.py | import pytest
import threading
import socket as stdlib_socket
import ssl
from contextlib import contextmanager
from functools import partial
from OpenSSL import SSL
import trustme
from async_generator import asynccontextmanager
import trio
from .. import _core
from .._highlevel_socket import SocketStream, SocketListener
from .._highlevel_generic import aclose_forcefully
from .._core import ClosedResourceError, BrokenResourceError
from .._highlevel_open_tcp_stream import open_tcp_stream
from .. import socket as tsocket
from .._ssl import SSLStream, SSLListener, NeedHandshakeError
from .._util import ConflictDetector
from .._core.tests.tutil import slow
from ..testing import (
assert_checkpoints,
Sequencer,
memory_stream_pair,
lockstep_stream_pair,
check_two_way_stream,
)
# We have two different kinds of echo server fixtures we use for testing. The
# first is a real server written using the stdlib ssl module and blocking
# sockets. It runs in a thread and we talk to it over a real socketpair(), to
# validate interoperability in a semi-realistic setting.
#
# The second is a very weird virtual echo server that lives inside a custom
# Stream class. It lives entirely inside the Python object space; there are no
# operating system calls in it at all. No threads, no I/O, nothing. It's
# 'send_all' call takes encrypted data from a client and feeds it directly into
# the server-side TLS state engine to decrypt, then takes that data, feeds it
# back through to get the encrypted response, and returns it from 'receive_some'. This
# gives us full control and reproducibility. This server is written using
# PyOpenSSL, so that we can trigger renegotiations on demand. It also allows
# us to insert random (virtual) delays, to really exercise all the weird paths
# in SSLStream's state engine.
#
# Both present a certificate for "trio-test-1.example.org".
TRIO_TEST_CA = trustme.CA()
TRIO_TEST_1_CERT = TRIO_TEST_CA.issue_server_cert("trio-test-1.example.org")
SERVER_CTX = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
TRIO_TEST_1_CERT.configure_cert(SERVER_CTX)
# TLS 1.3 has a lot of changes from previous versions. So we want to run tests
# with both TLS 1.3, and TLS 1.2.
if hasattr(ssl, "OP_NO_TLSv1_3"):
# "tls13" means that we're willing to negotiate TLS 1.3. Usually that's
# what will happen, but the renegotiation tests explicitly force a
# downgrade on the server side. "tls12" means we refuse to negotiate TLS
# 1.3, so we'll almost certainly use TLS 1.2.
client_ctx_params = ["tls13", "tls12"]
else:
# We can't control whether we use TLS 1.3, so we just have to accept
# whatever openssl wants to use. This might be TLS 1.2 (if openssl is
# old), or it might be TLS 1.3 (if openssl is new, but our python version
# is too old to expose the configuration knobs).
client_ctx_params = ["default"]
@pytest.fixture(scope="module", params=client_ctx_params)
def client_ctx(request):
ctx = ssl.create_default_context()
TRIO_TEST_CA.configure_trust(ctx)
if request.param in ["default", "tls13"]:
return ctx
elif request.param == "tls12":
ctx.options |= ssl.OP_NO_TLSv1_3
return ctx
else: # pragma: no cover
assert False
# The blocking socket server.
def ssl_echo_serve_sync(sock, *, expect_fail=False):
try:
wrapped = SERVER_CTX.wrap_socket(
sock, server_side=True, suppress_ragged_eofs=False
)
with wrapped:
wrapped.do_handshake()
while True:
data = wrapped.recv(4096)
if not data:
# other side has initiated a graceful shutdown; we try to
# respond in kind but it's legal for them to have already
# gone away.
exceptions = (BrokenPipeError, ssl.SSLZeroReturnError)
# Under unclear conditions, CPython sometimes raises
# SSLWantWriteError here. This is a bug (bpo-32219), but
# it's not our bug, so ignore it.
exceptions += (ssl.SSLWantWriteError,)
try:
wrapped.unwrap()
except exceptions:
pass
return
wrapped.sendall(data)
# This is an obscure workaround for an openssl bug. In server mode, in
# some versions, openssl sends some extra data at the end of do_handshake
# that it shouldn't send. Normally this is harmless, but, if the other
# side shuts down the connection before it reads that data, it might cause
# the OS to report a ECONNREST or even ECONNABORTED (which is just wrong,
# since ECONNABORTED is supposed to mean that connect() failed, but what
# can you do). In this case the other side did nothing wrong, but there's
# no way to recover, so we let it pass, and just cross our fingers its not
# hiding any (other) real bugs. For more details see:
#
# https://github.com/python-trio/trio/issues/1293
#
# Also, this happens frequently but non-deterministically, so we have to
# 'no cover' it to avoid coverage flapping.
except (ConnectionResetError, ConnectionAbortedError): # pragma: no cover
return
except Exception as exc:
if expect_fail:
print("ssl_echo_serve_sync got error as expected:", exc)
else: # pragma: no cover
print("ssl_echo_serve_sync got unexpected error:", exc)
raise
else:
if expect_fail: # pragma: no cover
raise RuntimeError("failed to fail?")
finally:
sock.close()
# Fixture that gives a raw socket connected to a trio-test-1 echo server
# (running in a thread). Useful for testing making connections with different
# SSLContexts.
@asynccontextmanager
async def ssl_echo_server_raw(**kwargs):
a, b = stdlib_socket.socketpair()
async with trio.open_nursery() as nursery:
# Exiting the 'with a, b' context manager closes the sockets, which
# causes the thread to exit (possibly with an error), which allows the
# nursery context manager to exit too.
with a, b:
nursery.start_soon(
trio.to_thread.run_sync, partial(ssl_echo_serve_sync, b, **kwargs)
)
yield SocketStream(tsocket.from_stdlib_socket(a))
# Fixture that gives a properly set up SSLStream connected to a trio-test-1
# echo server (running in a thread)
@asynccontextmanager
async def ssl_echo_server(client_ctx, **kwargs):
async with ssl_echo_server_raw(**kwargs) as sock:
yield SSLStream(sock, client_ctx, server_hostname="trio-test-1.example.org")
# The weird in-memory server ... thing.
# Doesn't inherit from Stream because I left out the methods that we don't
# actually need.
class PyOpenSSLEchoStream:
def __init__(self, sleeper=None):
ctx = SSL.Context(SSL.SSLv23_METHOD)
# TLS 1.3 removes renegotiation support. Which is great for them, but
# we still have to support versions before that, and that means we
# need to test renegotiation support, which means we need to force this
# to use a lower version where this test server can trigger
# renegotiations. Of course TLS 1.3 support isn't released yet, but
# I'm told that this will work once it is. (And once it is we can
# remove the pragma: no cover too.) Alternatively, we could switch to
# using TLSv1_2_METHOD.
#
# Discussion: https://github.com/pyca/pyopenssl/issues/624
# This is the right way, but we can't use it until this PR is in a
# released:
# https://github.com/pyca/pyopenssl/pull/861
#
# if hasattr(SSL, "OP_NO_TLSv1_3"):
# ctx.set_options(SSL.OP_NO_TLSv1_3)
#
# Fortunately pyopenssl uses cryptography under the hood, so we can be
# confident that they're using the same version of openssl
from cryptography.hazmat.bindings.openssl.binding import Binding
b = Binding()
if hasattr(b.lib, "SSL_OP_NO_TLSv1_3"):
ctx.set_options(b.lib.SSL_OP_NO_TLSv1_3)
# Unfortunately there's currently no way to say "use 1.3 or worse", we
# can only disable specific versions. And if the two sides start
# negotiating 1.4 at some point in the future, it *might* mean that
# our tests silently stop working properly. So the next line is a
# tripwire to remind us we need to revisit this stuff in 5 years or
# whatever when the next TLS version is released:
assert not hasattr(SSL, "OP_NO_TLSv1_4")
TRIO_TEST_1_CERT.configure_cert(ctx)
self._conn = SSL.Connection(ctx, None)
self._conn.set_accept_state()
self._lot = _core.ParkingLot()
self._pending_cleartext = bytearray()
self._send_all_conflict_detector = ConflictDetector(
"simultaneous calls to PyOpenSSLEchoStream.send_all"
)
self._receive_some_conflict_detector = ConflictDetector(
"simultaneous calls to PyOpenSSLEchoStream.receive_some"
)
if sleeper is None:
async def no_op_sleeper(_):
return
self.sleeper = no_op_sleeper
else:
self.sleeper = sleeper
async def aclose(self):
self._conn.bio_shutdown()
def renegotiate_pending(self):
return self._conn.renegotiate_pending()
def renegotiate(self):
# Returns false if a renegotiation is already in progress, meaning
# nothing happens.
assert self._conn.renegotiate()
async def wait_send_all_might_not_block(self):
with self._send_all_conflict_detector:
await _core.checkpoint()
await _core.checkpoint()
await self.sleeper("wait_send_all_might_not_block")
async def send_all(self, data):
print(" --> transport_stream.send_all")
with self._send_all_conflict_detector:
await _core.checkpoint()
await _core.checkpoint()
await self.sleeper("send_all")
self._conn.bio_write(data)
while True:
await self.sleeper("send_all")
try:
data = self._conn.recv(1)
except SSL.ZeroReturnError:
self._conn.shutdown()
print("renegotiations:", self._conn.total_renegotiations())
break
except SSL.WantReadError:
break
else:
self._pending_cleartext += data
self._lot.unpark_all()
await self.sleeper("send_all")
print(" <-- transport_stream.send_all finished")
async def receive_some(self, nbytes=None):
print(" --> transport_stream.receive_some")
if nbytes is None:
nbytes = 65536 # arbitrary
with self._receive_some_conflict_detector:
try:
await _core.checkpoint()
await _core.checkpoint()
while True:
await self.sleeper("receive_some")
try:
return self._conn.bio_read(nbytes)
except SSL.WantReadError:
# No data in our ciphertext buffer; try to generate
# some.
if self._pending_cleartext:
# We have some cleartext; maybe we can encrypt it
# and then return it.
print(" trying", self._pending_cleartext)
try:
# PyOpenSSL bug: doesn't accept bytearray
# https://github.com/pyca/pyopenssl/issues/621
next_byte = self._pending_cleartext[0:1]
self._conn.send(bytes(next_byte))
# Apparently this next bit never gets hit in the
# test suite, but it's not an interesting omission
# so let's pragma it.
except SSL.WantReadError: # pragma: no cover
# We didn't manage to send the cleartext (and
# in particular we better leave it there to
# try again, due to openssl's retry
# semantics), but it's possible we pushed a
# renegotiation forward and *now* we have data
# to send.
try:
return self._conn.bio_read(nbytes)
except SSL.WantReadError:
# Nope. We're just going to have to wait
# for someone to call send_all() to give
# use more data.
print("parking (a)")
await self._lot.park()
else:
# We successfully sent that byte, so we don't
# have to again.
del self._pending_cleartext[0:1]
else:
# no pending cleartext; nothing to do but wait for
# someone to call send_all
print("parking (b)")
await self._lot.park()
finally:
await self.sleeper("receive_some")
print(" <-- transport_stream.receive_some finished")
async def test_PyOpenSSLEchoStream_gives_resource_busy_errors():
# Make sure that PyOpenSSLEchoStream complains if two tasks call send_all
# at the same time, or ditto for receive_some. The tricky cases where SSLStream
# might accidentally do this are during renegotiation, which we test using
# PyOpenSSLEchoStream, so this makes sure that if we do have a bug then
# PyOpenSSLEchoStream will notice and complain.
s = PyOpenSSLEchoStream()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(s.send_all, b"x")
nursery.start_soon(s.send_all, b"x")
assert "simultaneous" in str(excinfo.value)
s = PyOpenSSLEchoStream()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(s.send_all, b"x")
nursery.start_soon(s.wait_send_all_might_not_block)
assert "simultaneous" in str(excinfo.value)
s = PyOpenSSLEchoStream()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(s.wait_send_all_might_not_block)
nursery.start_soon(s.wait_send_all_might_not_block)
assert "simultaneous" in str(excinfo.value)
s = PyOpenSSLEchoStream()
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(s.receive_some, 1)
nursery.start_soon(s.receive_some, 1)
assert "simultaneous" in str(excinfo.value)
@contextmanager
def virtual_ssl_echo_server(client_ctx, **kwargs):
fakesock = PyOpenSSLEchoStream(**kwargs)
yield SSLStream(fakesock, client_ctx, server_hostname="trio-test-1.example.org")
def ssl_wrap_pair(
client_ctx,
client_transport,
server_transport,
*,
client_kwargs={},
server_kwargs={},
):
client_ssl = SSLStream(
client_transport,
client_ctx,
server_hostname="trio-test-1.example.org",
**client_kwargs,
)
server_ssl = SSLStream(
server_transport, SERVER_CTX, server_side=True, **server_kwargs
)
return client_ssl, server_ssl
def ssl_memory_stream_pair(client_ctx, **kwargs):
client_transport, server_transport = memory_stream_pair()
return ssl_wrap_pair(client_ctx, client_transport, server_transport, **kwargs)
def ssl_lockstep_stream_pair(client_ctx, **kwargs):
client_transport, server_transport = lockstep_stream_pair()
return ssl_wrap_pair(client_ctx, client_transport, server_transport, **kwargs)
# Simple smoke test for handshake/send/receive/shutdown talking to a
# synchronous server, plus make sure that we do the bare minimum of
# certificate checking (even though this is really Python's responsibility)
async def test_ssl_client_basics(client_ctx):
# Everything OK
async with ssl_echo_server(client_ctx) as s:
assert not s.server_side
await s.send_all(b"x")
assert await s.receive_some(1) == b"x"
await s.aclose()
# Didn't configure the CA file, should fail
async with ssl_echo_server_raw(expect_fail=True) as sock:
bad_client_ctx = ssl.create_default_context()
s = SSLStream(sock, bad_client_ctx, server_hostname="trio-test-1.example.org")
assert not s.server_side
with pytest.raises(BrokenResourceError) as excinfo:
await s.send_all(b"x")
assert isinstance(excinfo.value.__cause__, ssl.SSLError)
# Trusted CA, but wrong host name
async with ssl_echo_server_raw(expect_fail=True) as sock:
s = SSLStream(sock, client_ctx, server_hostname="trio-test-2.example.org")
assert not s.server_side
with pytest.raises(BrokenResourceError) as excinfo:
await s.send_all(b"x")
assert isinstance(excinfo.value.__cause__, ssl.CertificateError)
async def test_ssl_server_basics(client_ctx):
a, b = stdlib_socket.socketpair()
with a, b:
server_sock = tsocket.from_stdlib_socket(b)
server_transport = SSLStream(
SocketStream(server_sock), SERVER_CTX, server_side=True
)
assert server_transport.server_side
def client():
with client_ctx.wrap_socket(
a, server_hostname="trio-test-1.example.org"
) as client_sock:
client_sock.sendall(b"x")
assert client_sock.recv(1) == b"y"
client_sock.sendall(b"z")
client_sock.unwrap()
t = threading.Thread(target=client)
t.start()
assert await server_transport.receive_some(1) == b"x"
await server_transport.send_all(b"y")
assert await server_transport.receive_some(1) == b"z"
assert await server_transport.receive_some(1) == b""
await server_transport.aclose()
t.join()
async def test_attributes(client_ctx):
async with ssl_echo_server_raw(expect_fail=True) as sock:
good_ctx = client_ctx
bad_ctx = ssl.create_default_context()
s = SSLStream(sock, good_ctx, server_hostname="trio-test-1.example.org")
assert s.transport_stream is sock
# Forwarded attribute getting
assert s.context is good_ctx
assert s.server_side == False # noqa
assert s.server_hostname == "trio-test-1.example.org"
with pytest.raises(AttributeError):
s.asfdasdfsa
# __dir__
assert "transport_stream" in dir(s)
assert "context" in dir(s)
# Setting the attribute goes through to the underlying object
# most attributes on SSLObject are read-only
with pytest.raises(AttributeError):
s.server_side = True
with pytest.raises(AttributeError):
s.server_hostname = "asdf"
# but .context is *not*. Check that we forward attribute setting by
# making sure that after we set the bad context our handshake indeed
# fails:
s.context = bad_ctx
assert s.context is bad_ctx
with pytest.raises(BrokenResourceError) as excinfo:
await s.do_handshake()
assert isinstance(excinfo.value.__cause__, ssl.SSLError)
# Note: this test fails horribly if we force TLS 1.2 and trigger a
# renegotiation at the beginning (e.g. by switching to the pyopenssl
# server). Usually the client crashes in SSLObject.write with "UNEXPECTED
# RECORD"; sometimes we get something more exotic like a SyscallError. This is
# odd because openssl isn't doing any syscalls, but so it goes. After lots of
# websearching I'm pretty sure this is due to a bug in OpenSSL, where it just
# can't reliably handle full-duplex communication combined with
# renegotiation. Nice, eh?
#
# https://rt.openssl.org/Ticket/Display.html?id=3712
# https://rt.openssl.org/Ticket/Display.html?id=2481
# http://openssl.6102.n7.nabble.com/TLS-renegotiation-failure-on-receiving-application-data-during-handshake-td48127.html
# https://stackoverflow.com/questions/18728355/ssl-renegotiation-with-full-duplex-socket-communication
#
# In some variants of this test (maybe only against the java server?) I've
# also seen cases where our send_all blocks waiting to write, and then our receive_some
# also blocks waiting to write, and they never wake up again. It looks like
# some kind of deadlock. I suspect there may be an issue where we've filled up
# the send buffers, and the remote side is trying to handle the renegotiation
# from inside a write() call, so it has a problem: there's all this application
# data clogging up the pipe, but it can't process and return it to the
# application because it's in write(), and it doesn't want to buffer infinite
# amounts of data, and... actually I guess those are the only two choices.
#
# NSS even documents that you shouldn't try to do a renegotiation except when
# the connection is idle:
#
# https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/SSL_functions/sslfnc.html#1061582
#
# I begin to see why HTTP/2 forbids renegotiation and TLS 1.3 removes it...
async def test_full_duplex_basics(client_ctx):
CHUNKS = 30
CHUNK_SIZE = 32768
EXPECTED = CHUNKS * CHUNK_SIZE
sent = bytearray()
received = bytearray()
async def sender(s):
nonlocal sent
for i in range(CHUNKS):
print(i)
chunk = bytes([i] * CHUNK_SIZE)
sent += chunk
await s.send_all(chunk)
async def receiver(s):
nonlocal received
while len(received) < EXPECTED:
chunk = await s.receive_some(CHUNK_SIZE // 2)
received += chunk
async with ssl_echo_server(client_ctx) as s:
async with _core.open_nursery() as nursery:
nursery.start_soon(sender, s)
nursery.start_soon(receiver, s)
# And let's have some doing handshakes too, everyone
# simultaneously
nursery.start_soon(s.do_handshake)
nursery.start_soon(s.do_handshake)
await s.aclose()
assert len(sent) == len(received) == EXPECTED
assert sent == received
async def test_renegotiation_simple(client_ctx):
with virtual_ssl_echo_server(client_ctx) as s:
await s.do_handshake()
s.transport_stream.renegotiate()
await s.send_all(b"a")
assert await s.receive_some(1) == b"a"
# Have to send some more data back and forth to make sure the
# renegotiation is finished before shutting down the
# connection... otherwise openssl raises an error. I think this is a
# bug in openssl but what can ya do.
await s.send_all(b"b")
assert await s.receive_some(1) == b"b"
await s.aclose()
@slow
async def test_renegotiation_randomized(mock_clock, client_ctx):
# The only blocking things in this function are our random sleeps, so 0 is
# a good threshold.
mock_clock.autojump_threshold = 0
import random
r = random.Random(0)
async def sleeper(_):
await trio.sleep(r.uniform(0, 10))
async def clear():
while s.transport_stream.renegotiate_pending():
with assert_checkpoints():
await send(b"-")
with assert_checkpoints():
await expect(b"-")
print("-- clear --")
async def send(byte):
await s.transport_stream.sleeper("outer send")
print("calling SSLStream.send_all", byte)
with assert_checkpoints():
await s.send_all(byte)
async def expect(expected):
await s.transport_stream.sleeper("expect")
print("calling SSLStream.receive_some, expecting", expected)
assert len(expected) == 1
with assert_checkpoints():
assert await s.receive_some(1) == expected
with virtual_ssl_echo_server(client_ctx, sleeper=sleeper) as s:
await s.do_handshake()
await send(b"a")
s.transport_stream.renegotiate()
await expect(b"a")
await clear()
for i in range(100):
b1 = bytes([i % 0xFF])
b2 = bytes([(2 * i) % 0xFF])
s.transport_stream.renegotiate()
async with _core.open_nursery() as nursery:
nursery.start_soon(send, b1)
nursery.start_soon(expect, b1)
async with _core.open_nursery() as nursery:
nursery.start_soon(expect, b2)
nursery.start_soon(send, b2)
await clear()
for i in range(100):
b1 = bytes([i % 0xFF])
b2 = bytes([(2 * i) % 0xFF])
await send(b1)
s.transport_stream.renegotiate()
await expect(b1)
async with _core.open_nursery() as nursery:
nursery.start_soon(expect, b2)
nursery.start_soon(send, b2)
await clear()
# Checking that wait_send_all_might_not_block and receive_some don't
# conflict:
# 1) Set up a situation where expect (receive_some) is blocked sending,
# and wait_send_all_might_not_block comes in.
# Our receive_some() call will get stuck when it hits send_all
async def sleeper_with_slow_send_all(method):
if method == "send_all":
await trio.sleep(100000)
# And our wait_send_all_might_not_block call will give it time to get
# stuck, and then start
async def sleep_then_wait_writable():
await trio.sleep(1000)
await s.wait_send_all_might_not_block()
with virtual_ssl_echo_server(client_ctx, sleeper=sleeper_with_slow_send_all) as s:
await send(b"x")
s.transport_stream.renegotiate()
async with _core.open_nursery() as nursery:
nursery.start_soon(expect, b"x")
nursery.start_soon(sleep_then_wait_writable)
await clear()
await s.aclose()
# 2) Same, but now wait_send_all_might_not_block is stuck when
# receive_some tries to send.
async def sleeper_with_slow_wait_writable_and_expect(method):
if method == "wait_send_all_might_not_block":
await trio.sleep(100000)
elif method == "expect":
await trio.sleep(1000)
with virtual_ssl_echo_server(
client_ctx, sleeper=sleeper_with_slow_wait_writable_and_expect
) as s:
await send(b"x")
s.transport_stream.renegotiate()
async with _core.open_nursery() as nursery:
nursery.start_soon(expect, b"x")
nursery.start_soon(s.wait_send_all_might_not_block)
await clear()
await s.aclose()
async def test_resource_busy_errors(client_ctx):
async def do_send_all():
with assert_checkpoints():
await s.send_all(b"x")
async def do_receive_some():
with assert_checkpoints():
await s.receive_some(1)
async def do_wait_send_all_might_not_block():
with assert_checkpoints():
await s.wait_send_all_might_not_block()
s, _ = ssl_lockstep_stream_pair(client_ctx)
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(do_send_all)
nursery.start_soon(do_send_all)
assert "another task" in str(excinfo.value)
s, _ = ssl_lockstep_stream_pair(client_ctx)
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(do_receive_some)
nursery.start_soon(do_receive_some)
assert "another task" in str(excinfo.value)
s, _ = ssl_lockstep_stream_pair(client_ctx)
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(do_send_all)
nursery.start_soon(do_wait_send_all_might_not_block)
assert "another task" in str(excinfo.value)
s, _ = ssl_lockstep_stream_pair(client_ctx)
with pytest.raises(_core.BusyResourceError) as excinfo:
async with _core.open_nursery() as nursery:
nursery.start_soon(do_wait_send_all_might_not_block)
nursery.start_soon(do_wait_send_all_might_not_block)
assert "another task" in str(excinfo.value)
async def test_wait_writable_calls_underlying_wait_writable():
record = []
class NotAStream:
async def wait_send_all_might_not_block(self):
record.append("ok")
ctx = ssl.create_default_context()
s = SSLStream(NotAStream(), ctx, server_hostname="x")
await s.wait_send_all_might_not_block()
assert record == ["ok"]
async def test_checkpoints(client_ctx):
async with ssl_echo_server(client_ctx) as s:
with assert_checkpoints():
await s.do_handshake()
with assert_checkpoints():
await s.do_handshake()
with assert_checkpoints():
await s.wait_send_all_might_not_block()
with assert_checkpoints():
await s.send_all(b"xxx")
with assert_checkpoints():
await s.receive_some(1)
# These receive_some's in theory could return immediately, because the
# "xxx" was sent in a single record and after the first
# receive_some(1) the rest are sitting inside the SSLObject's internal
# buffers.
with assert_checkpoints():
await s.receive_some(1)
with assert_checkpoints():
await s.receive_some(1)
with assert_checkpoints():
await s.unwrap()
async with ssl_echo_server(client_ctx) as s:
await s.do_handshake()
with assert_checkpoints():
await s.aclose()
async def test_send_all_empty_string(client_ctx):
async with ssl_echo_server(client_ctx) as s:
await s.do_handshake()
# underlying SSLObject interprets writing b"" as indicating an EOF,
# for some reason. Make sure we don't inherit this.
with assert_checkpoints():
await s.send_all(b"")
with assert_checkpoints():
await s.send_all(b"")
await s.send_all(b"x")
assert await s.receive_some(1) == b"x"
await s.aclose()
@pytest.mark.parametrize("https_compatible", [False, True])
async def test_SSLStream_generic(client_ctx, https_compatible):
async def stream_maker():
return ssl_memory_stream_pair(
client_ctx,
client_kwargs={"https_compatible": https_compatible},
server_kwargs={"https_compatible": https_compatible},
)
async def clogged_stream_maker():
client, server = ssl_lockstep_stream_pair(client_ctx)
# If we don't do handshakes up front, then we run into a problem in
# the following situation:
# - server does wait_send_all_might_not_block
# - client does receive_some to unclog it
# Then the client's receive_some will actually send some data to start
# the handshake, and itself get stuck.
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
return client, server
await check_two_way_stream(stream_maker, clogged_stream_maker)
async def test_unwrap(client_ctx):
client_ssl, server_ssl = ssl_memory_stream_pair(client_ctx)
client_transport = client_ssl.transport_stream
server_transport = server_ssl.transport_stream
seq = Sequencer()
async def client():
await client_ssl.do_handshake()
await client_ssl.send_all(b"x")
assert await client_ssl.receive_some(1) == b"y"
await client_ssl.send_all(b"z")
# After sending that, disable outgoing data from our end, to make
# sure the server doesn't see our EOF until after we've sent some
# trailing data
async with seq(0):
send_all_hook = client_transport.send_stream.send_all_hook
client_transport.send_stream.send_all_hook = None
assert await client_ssl.receive_some(1) == b""
assert client_ssl.transport_stream is client_transport
# We just received EOF. Unwrap the connection and send some more.
raw, trailing = await client_ssl.unwrap()
assert raw is client_transport
assert trailing == b""
assert client_ssl.transport_stream is None
await raw.send_all(b"trailing")
# Reconnect the streams. Now the server will receive both our shutdown
# acknowledgement + the trailing data in a single lump.
client_transport.send_stream.send_all_hook = send_all_hook
await client_transport.send_stream.send_all_hook()
async def server():
await server_ssl.do_handshake()
assert await server_ssl.receive_some(1) == b"x"
await server_ssl.send_all(b"y")
assert await server_ssl.receive_some(1) == b"z"
# Now client is blocked waiting for us to send something, but
# instead we close the TLS connection (with sequencer to make sure
# that the client won't see and automatically respond before we've had
# a chance to disable the client->server transport)
async with seq(1):
raw, trailing = await server_ssl.unwrap()
assert raw is server_transport
assert trailing == b"trailing"
assert server_ssl.transport_stream is None
async with _core.open_nursery() as nursery:
nursery.start_soon(client)
nursery.start_soon(server)
async def test_closing_nice_case(client_ctx):
# the nice case: graceful closes all around
client_ssl, server_ssl = ssl_memory_stream_pair(client_ctx)
client_transport = client_ssl.transport_stream
# Both the handshake and the close require back-and-forth discussion, so
# we need to run them concurrently
async def client_closer():
with assert_checkpoints():
await client_ssl.aclose()
async def server_closer():
assert await server_ssl.receive_some(10) == b""
assert await server_ssl.receive_some(10) == b""
with assert_checkpoints():
await server_ssl.aclose()
async with _core.open_nursery() as nursery:
nursery.start_soon(client_closer)
nursery.start_soon(server_closer)
# closing the SSLStream also closes its transport
with pytest.raises(ClosedResourceError):
await client_transport.send_all(b"123")
# once closed, it's OK to close again
with assert_checkpoints():
await client_ssl.aclose()
with assert_checkpoints():
await client_ssl.aclose()
# Trying to send more data does not work
with pytest.raises(ClosedResourceError):
await server_ssl.send_all(b"123")
# And once the connection is has been closed *locally*, then instead of
# getting empty bytestrings we get a proper error
with pytest.raises(ClosedResourceError):
await client_ssl.receive_some(10) == b""
with pytest.raises(ClosedResourceError):
await client_ssl.unwrap()
with pytest.raises(ClosedResourceError):
await client_ssl.do_handshake()
# Check that a graceful close *before* handshaking gives a clean EOF on
# the other side
client_ssl, server_ssl = ssl_memory_stream_pair(client_ctx)
async def expect_eof_server():
with assert_checkpoints():
assert await server_ssl.receive_some(10) == b""
with assert_checkpoints():
await server_ssl.aclose()
async with _core.open_nursery() as nursery:
nursery.start_soon(client_ssl.aclose)
nursery.start_soon(expect_eof_server)
async def test_send_all_fails_in_the_middle(client_ctx):
client, server = ssl_memory_stream_pair(client_ctx)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
async def bad_hook():
raise KeyError
client.transport_stream.send_stream.send_all_hook = bad_hook
with pytest.raises(KeyError):
await client.send_all(b"x")
with pytest.raises(BrokenResourceError):
await client.wait_send_all_might_not_block()
closed = 0
def close_hook():
nonlocal closed
closed += 1
client.transport_stream.send_stream.close_hook = close_hook
client.transport_stream.receive_stream.close_hook = close_hook
await client.aclose()
assert closed == 2
async def test_ssl_over_ssl(client_ctx):
client_0, server_0 = memory_stream_pair()
client_1 = SSLStream(
client_0, client_ctx, server_hostname="trio-test-1.example.org"
)
server_1 = SSLStream(server_0, SERVER_CTX, server_side=True)
client_2 = SSLStream(
client_1, client_ctx, server_hostname="trio-test-1.example.org"
)
server_2 = SSLStream(server_1, SERVER_CTX, server_side=True)
async def client():
await client_2.send_all(b"hi")
assert await client_2.receive_some(10) == b"bye"
async def server():
assert await server_2.receive_some(10) == b"hi"
await server_2.send_all(b"bye")
async with _core.open_nursery() as nursery:
nursery.start_soon(client)
nursery.start_soon(server)
async def test_ssl_bad_shutdown(client_ctx):
client, server = ssl_memory_stream_pair(client_ctx)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
await trio.aclose_forcefully(client)
# now the server sees a broken stream
with pytest.raises(BrokenResourceError):
await server.receive_some(10)
with pytest.raises(BrokenResourceError):
await server.send_all(b"x" * 10)
await server.aclose()
async def test_ssl_bad_shutdown_but_its_ok(client_ctx):
client, server = ssl_memory_stream_pair(
client_ctx,
server_kwargs={"https_compatible": True},
client_kwargs={"https_compatible": True},
)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
await trio.aclose_forcefully(client)
# the server sees that as a clean shutdown
assert await server.receive_some(10) == b""
with pytest.raises(BrokenResourceError):
await server.send_all(b"x" * 10)
await server.aclose()
async def test_ssl_handshake_failure_during_aclose():
# Weird scenario: aclose() triggers an automatic handshake, and this
# fails. This also exercises a bit of code in aclose() that was otherwise
# uncovered, for re-raising exceptions after calling aclose_forcefully on
# the underlying transport.
async with ssl_echo_server_raw(expect_fail=True) as sock:
# Don't configure trust correctly
client_ctx = ssl.create_default_context()
s = SSLStream(sock, client_ctx, server_hostname="trio-test-1.example.org")
# It's a little unclear here whether aclose should swallow the error
# or let it escape. We *do* swallow the error if it arrives when we're
# sending close_notify, because both sides closing the connection
# simultaneously is allowed. But I guess when https_compatible=False
# then it's bad if we can get through a whole connection with a peer
# that has no valid certificate, and never raise an error.
with pytest.raises(BrokenResourceError):
await s.aclose()
async def test_ssl_only_closes_stream_once(client_ctx):
# We used to have a bug where if transport_stream.aclose() raised an
# error, we would call it again. This checks that that's fixed.
client, server = ssl_memory_stream_pair(client_ctx)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
client_orig_close_hook = client.transport_stream.send_stream.close_hook
transport_close_count = 0
def close_hook():
nonlocal transport_close_count
client_orig_close_hook()
transport_close_count += 1
raise KeyError
client.transport_stream.send_stream.close_hook = close_hook
with pytest.raises(KeyError):
await client.aclose()
assert transport_close_count == 1
async def test_ssl_https_compatibility_disagreement(client_ctx):
client, server = ssl_memory_stream_pair(
client_ctx,
server_kwargs={"https_compatible": False},
client_kwargs={"https_compatible": True},
)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
# client is in HTTPS-mode, server is not
# so client doing graceful_shutdown causes an error on server
async def receive_and_expect_error():
with pytest.raises(BrokenResourceError) as excinfo:
await server.receive_some(10)
assert isinstance(excinfo.value.__cause__, ssl.SSLEOFError)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.aclose)
nursery.start_soon(receive_and_expect_error)
async def test_https_mode_eof_before_handshake(client_ctx):
client, server = ssl_memory_stream_pair(
client_ctx,
server_kwargs={"https_compatible": True},
client_kwargs={"https_compatible": True},
)
async def server_expect_clean_eof():
assert await server.receive_some(10) == b""
async with _core.open_nursery() as nursery:
nursery.start_soon(client.aclose)
nursery.start_soon(server_expect_clean_eof)
async def test_send_error_during_handshake(client_ctx):
client, server = ssl_memory_stream_pair(client_ctx)
async def bad_hook():
raise KeyError
client.transport_stream.send_stream.send_all_hook = bad_hook
with pytest.raises(KeyError):
with assert_checkpoints():
await client.do_handshake()
with pytest.raises(BrokenResourceError):
with assert_checkpoints():
await client.do_handshake()
async def test_receive_error_during_handshake(client_ctx):
client, server = ssl_memory_stream_pair(client_ctx)
async def bad_hook():
raise KeyError
client.transport_stream.receive_stream.receive_some_hook = bad_hook
async def client_side(cancel_scope):
with pytest.raises(KeyError):
with assert_checkpoints():
await client.do_handshake()
cancel_scope.cancel()
async with _core.open_nursery() as nursery:
nursery.start_soon(client_side, nursery.cancel_scope)
nursery.start_soon(server.do_handshake)
with pytest.raises(BrokenResourceError):
with assert_checkpoints():
await client.do_handshake()
async def test_selected_alpn_protocol_before_handshake(client_ctx):
client, server = ssl_memory_stream_pair(client_ctx)
with pytest.raises(NeedHandshakeError):
client.selected_alpn_protocol()
with pytest.raises(NeedHandshakeError):
server.selected_alpn_protocol()
async def test_selected_alpn_protocol_when_not_set(client_ctx):
# ALPN protocol still returns None when it's not ser,
# instead of raising an exception
client, server = ssl_memory_stream_pair(client_ctx)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
assert client.selected_alpn_protocol() is None
assert server.selected_alpn_protocol() is None
assert client.selected_alpn_protocol() == server.selected_alpn_protocol()
async def test_selected_npn_protocol_before_handshake(client_ctx):
client, server = ssl_memory_stream_pair(client_ctx)
with pytest.raises(NeedHandshakeError):
client.selected_npn_protocol()
with pytest.raises(NeedHandshakeError):
server.selected_npn_protocol()
async def test_selected_npn_protocol_when_not_set(client_ctx):
# NPN protocol still returns None when it's not ser,
# instead of raising an exception
client, server = ssl_memory_stream_pair(client_ctx)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
assert client.selected_npn_protocol() is None
assert server.selected_npn_protocol() is None
assert client.selected_npn_protocol() == server.selected_npn_protocol()
async def test_get_channel_binding_before_handshake(client_ctx):
client, server = ssl_memory_stream_pair(client_ctx)
with pytest.raises(NeedHandshakeError):
client.get_channel_binding()
with pytest.raises(NeedHandshakeError):
server.get_channel_binding()
async def test_get_channel_binding_after_handshake(client_ctx):
client, server = ssl_memory_stream_pair(client_ctx)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
assert client.get_channel_binding() is not None
assert server.get_channel_binding() is not None
assert client.get_channel_binding() == server.get_channel_binding()
async def test_getpeercert(client_ctx):
# Make sure we're not affected by https://bugs.python.org/issue29334
client, server = ssl_memory_stream_pair(client_ctx)
async with _core.open_nursery() as nursery:
nursery.start_soon(client.do_handshake)
nursery.start_soon(server.do_handshake)
assert server.getpeercert() is None
print(client.getpeercert())
assert ("DNS", "trio-test-1.example.org") in client.getpeercert()["subjectAltName"]
async def test_SSLListener(client_ctx):
async def setup(**kwargs):
listen_sock = tsocket.socket()
await listen_sock.bind(("127.0.0.1", 0))
listen_sock.listen(1)
socket_listener = SocketListener(listen_sock)
ssl_listener = SSLListener(socket_listener, SERVER_CTX, **kwargs)
transport_client = await open_tcp_stream(*listen_sock.getsockname())
ssl_client = SSLStream(
transport_client, client_ctx, server_hostname="trio-test-1.example.org"
)
return listen_sock, ssl_listener, ssl_client
listen_sock, ssl_listener, ssl_client = await setup()
async with ssl_client:
ssl_server = await ssl_listener.accept()
async with ssl_server:
assert not ssl_server._https_compatible
# Make sure the connection works
async with _core.open_nursery() as nursery:
nursery.start_soon(ssl_client.do_handshake)
nursery.start_soon(ssl_server.do_handshake)
# Test SSLListener.aclose
await ssl_listener.aclose()
assert listen_sock.fileno() == -1
################
# Test https_compatible
_, ssl_listener, ssl_client = await setup(https_compatible=True)
ssl_server = await ssl_listener.accept()
assert ssl_server._https_compatible
await aclose_forcefully(ssl_listener)
await aclose_forcefully(ssl_client)
await aclose_forcefully(ssl_server)
async def test_deprecated_max_refill_bytes(client_ctx):
stream1, stream2 = memory_stream_pair()
with pytest.warns(trio.TrioDeprecationWarning):
SSLStream(stream1, client_ctx, max_refill_bytes=100)
with pytest.warns(trio.TrioDeprecationWarning):
# passing None is wrong here, but I'm too lazy to make a fake Listener
# and we get away with it for now. And this test will be deleted in a
# release or two anyway, so hopefully we'll keep getting away with it
# for long enough.
SSLListener(None, client_ctx, max_refill_bytes=100)
|
test_auto_scheduler_measure.py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Test measurement and log serialization. """
import json
import multiprocessing
import numpy as np
import tvm
from tvm import topi
from tvm import te, auto_scheduler
import tempfile
import tvm.testing
import pickle
from test_auto_scheduler_common import matmul_auto_scheduler_test
from tvm.auto_scheduler import workload_registry
def record_common(dag, s):
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key="test", target=target)
inp = auto_scheduler.measure.MeasureInput(task, s)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
# Test in-memory record processing.
record_str = auto_scheduler.measure_record.dump_record_to_string(inp, res)
r_inp, r_res = auto_scheduler.measure_record.load_record_from_string(record_str)
# Only check the workload_key for simplification.
assert inp.task.workload_key == r_inp.task.workload_key
assert str(res) == str(r_res)
# Test file-based record processing.
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
s1 = dag.infer_bound_from_state(s)
s2 = dag.infer_bound_from_state(inputs[0].state)
assert s1 == s2
assert not (s1 == dag.get_init_state())
def test_record_split_reorder_fuse_annotation():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
# Split
its0 = s.split(C, s[C].iters[0], [4, 8, 8])
its1 = s.split(C, s[C].iters[4], [8, 4, 4])
# Reorder
s.reorder(
C, [its0[0], its1[0], its0[1], its1[1], its0[2], its1[2], its0[3], s[C].iters[8], its1[3]]
)
# Fuse
s.fuse(C, [s[C].iters[0], s[C].iters[1], s[C].iters[2]])
# Parallel
s.parallel(C, s[C].iters[0])
# Thread bind(The blockIdx & threadIdx are used in GPU, just for record testing here)
s.bind(C, s[C].iters[1], "blockIdx.x")
s.bind(C, s[C].iters[2], "threadIdx.z")
s.bind(C, s[C].iters[3], "vthread")
# Unroll
s.unroll(C, s[C].iters[4])
# Vectorize
s.vectorize(C, s[C].iters[6])
record_common(dag, s)
def test_record_compute_at_root_inline_cache_read_write():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
AA = topi.nn.relu(A)
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(AA[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
# Cache Write
C_shared = s.cache_write(C, "shared")
# Compute At
s.compute_at(C_shared, C, s[C].iters[0])
# Cache Read
B_global = s.cache_read(B, "global", [C_shared])
s.compute_at(B_global, C_shared, s[C_shared].iters[2])
# Compute Inline
s.compute_inline(AA)
# Compute Root
s.compute_root(C_shared)
record_common(dag, s)
def test_record_follow_split_follow_fused_split():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
D = topi.nn.relu(C)
E = topi.nn.relu(D)
dag = auto_scheduler.ComputeDAG([A, B, E])
s = dag.get_init_state()
# Follow Split
s.split(C, s[C].iters[0], [4, 2, 8, 4], True)
split_step0 = len(s.transform_steps) - 1
s.follow_split(C, s[C].iters[5], split_step0, 4)
# Follow Fused Split
its0 = s.split(E, s[E].iters[0], [4, 2, 8, 4], True)
split_step1 = len(s.transform_steps) - 1
its1 = s.split(E, s[E].iters[5], [2, 4, 2, 4], True)
split_step2 = len(s.transform_steps) - 1
its = []
for i0, i1 in zip(its0, its1):
its.append(i0)
its.append(i1)
for i in range(0, 5):
s.fuse(E, [s[E].iters[i], s[E].iters[i + 1]])
s.follow_fused_split(D, s[D].iters[0], [split_step1, split_step2], 2, True)
record_common(dag, s)
def test_record_pragma_storage_align_rfactor():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
dag = auto_scheduler.ComputeDAG([A, B, C])
s = dag.get_init_state()
# Rfactor
ko, _ = s.split(C, s[C].iters[2], [16])
s.rfactor(C, ko, 2)
# Pragma
s.pragma(C, s[C].iters[0], "auto_unroll_max_step$64")
# StorageAlign
s.storage_align(C, s[C].iters[-1], 8, 4)
record_common(dag, s)
def test_recover_measure_input():
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
raw_inp = inputs[0]
correct_inp = auto_scheduler.measure.recover_measure_input(raw_inp)
assert str(correct_inp.task.compute_dag) == str(inp.task.compute_dag)
correct_inp = auto_scheduler.measure.recover_measure_input(raw_inp, rebuild_state=True)
assert str(correct_inp.state) == str(inp.state)
def test_workload_dis_factor():
calc = auto_scheduler.utils.calc_workload_dis_factor
decode = auto_scheduler.utils.decode_workload_key
# Identical
target_wkl_key = json.dumps(
["func1", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [1, 1], "float32"]
)
assert calc(decode(target_wkl_key), decode(target_wkl_key)) == 1
# Compatible with a factor
wkl_key = json.dumps(["func1", [1, 3, 112, 112], [32, 3, 3, 3], [0, 0], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == 8 * 2 * 2
# Incompatible argument with zeros
wkl_key = json.dumps(["func1", [8, 3, 224, 224], [32, 3, 3, 3], [1, 1], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
wkl_key = json.dumps(["func1", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [0, 0], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
# Incompatible non-integter argument
wkl_key = json.dumps(["func1", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [1, 1], "int8"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
# Incompatible function
wkl_key = json.dumps(["func2", [8, 3, 224, 224], [32, 3, 3, 3], [0, 0], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
# Incompatible due to non-dividable factor
wkl_key = json.dumps(["func1", [8, 3, 223, 223], [32, 3, 3, 3], [0, 0], [1, 1], "float32"])
assert calc(decode(target_wkl_key), decode(wkl_key)) == float("inf")
def test_measure_local_builder_runner():
if not tvm.testing.device_enabled("llvm"):
return
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
def test_dag_measure_local_builder_runner():
if not tvm.testing.device_enabled("llvm"):
return
A = te.placeholder((512, 512), name="A")
B = te.placeholder((512, 512), name="B")
k = te.reduce_axis((0, 512), name="k")
C = te.compute((512, 512), lambda i, j: te.sum(A[i][k] * B[k][j], axis=[k]), name="C")
D = topi.nn.relu(C)
E = topi.nn.relu(D)
tensors = [A, B, E]
dag = auto_scheduler.ComputeDAG(tensors)
key = workload_registry.register_workload_tensors(dag.workload_key(), tensors)
transfer_data = workload_registry.serialize_workload_registry_entry(key)
f_data = pickle.dumps(transfer_data)
f_new = pickle.loads(f_data)
del workload_registry.WORKLOAD_FUNC_REGISTRY[key]
workload_registry.deserialize_workload_registry_entry(f_new)
target = tvm.target.Target("llvm")
task = auto_scheduler.SearchTask(compute_dag=dag, workload_key=key, target=target)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
def test_measure_local_builder_rpc_runner():
if not tvm.testing.device_enabled("llvm"):
return
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm"
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
measure_ctx = auto_scheduler.LocalRPCMeasureContext(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
rpc_runner = measure_ctx.runner
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = rpc_runner.run([minp], bress)
assert mress[0].error_no == 0
del measure_ctx
def measure_local_builder_rpc_runner_spawn():
assert multiprocessing.get_start_method(False) == "spawn"
test_measure_local_builder_rpc_runner()
@tvm.testing.requires_llvm
def test_measure_local_builder_rpc_runner_spawn():
ctx = multiprocessing.get_context("spawn")
p = ctx.Process(target=measure_local_builder_rpc_runner_spawn)
p.start()
p.join()
@tvm.testing.requires_llvm
def test_measure_target_host():
task = auto_scheduler.SearchTask(
func=matmul_auto_scheduler_test,
args=(512, 512, 512),
target=tvm.target.Target("llvm", "llvm -mtriple=aarch64-linux-gnu"),
)
inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state)
res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1)
with tempfile.NamedTemporaryFile() as fp:
auto_scheduler.save_records(fp.name, [inp], [res])
log_reader = auto_scheduler.RecordReader(fp.name)
inputs, _ = log_reader.read_lines()
assert len(inputs) == 1
raw_inp = inputs[0]
recovered_inp = auto_scheduler.measure.recover_measure_input(raw_inp)
assert str(recovered_inp.task.target.host) == str(inp.task.target.host)
@tvm.testing.requires_llvm
def test_measure_special_inputs_map_by_name_local_runner():
@auto_scheduler.register_workload
def foo():
X = te.placeholder(shape=[10], dtype="int32")
Index = te.placeholder(shape=[1], dtype="int32", name="Index")
Y = te.compute((1,), lambda i: X[Index[i]])
return [X, Index, Y]
# This workload cannot use random input for the `Index` input
task = auto_scheduler.SearchTask(
func=foo,
target="llvm",
task_inputs={
"Index": tvm.nd.array(np.array([5], dtype="int32")),
},
)
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
local_runner = auto_scheduler.LocalRunner(timeout=10)
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = local_runner.run([minp], bress)
assert mress[0].error_no == 0
@tvm.testing.requires_llvm
def test_measure_special_inputs_map_by_name_rpc_runner():
@auto_scheduler.register_workload
def foo():
X = te.placeholder(shape=[10], dtype="int32")
Index = te.placeholder(shape=[1], dtype="int32", name="Index")
Y = te.compute((1,), lambda i: X[Index[i]])
return [X, Index, Y]
# This workload cannot use random input for the `Index` input
task = auto_scheduler.SearchTask(
func=foo,
target="llvm",
task_inputs={
"Index": tvm.nd.array(np.array([5], dtype="int32")),
},
)
for enable_cpu_cache_flush in [True, False]:
minp = auto_scheduler.MeasureInput(task, task.compute_dag.init_state)
local_builder = auto_scheduler.LocalBuilder()
measure_ctx = auto_scheduler.LocalRPCMeasureContext(
timeout=60, enable_cpu_cache_flush=enable_cpu_cache_flush
)
rpc_runner = measure_ctx.runner
bress = local_builder.build([minp])
assert bress[0].error_no == 0
mress = rpc_runner.run([minp], bress)
assert mress[0].error_no == 0
if __name__ == "__main__":
test_record_split_reorder_fuse_annotation()
test_record_compute_at_root_inline_cache_read_write()
test_record_follow_split_follow_fused_split()
test_record_pragma_storage_align_rfactor()
test_recover_measure_input()
test_workload_dis_factor()
test_measure_local_builder_runner()
test_dag_measure_local_builder_runner()
test_measure_local_builder_rpc_runner()
test_measure_target_host()
test_measure_special_inputs_map_by_name_local_runner()
test_measure_special_inputs_map_by_name_rpc_runner()
|
figures_for_supplement.py | import time
import numpy as np
import plots
import itertools
from multiprocessing import Process, Lock
from pathlib import Path
import seaborn as sns
# from matplotlib import pyplot as plt
plots.folder_root = "../results/"
plots.LEGEND = True # Set this to False to remove legends for plots
subdir_prefix = Path('figs_supplement')
chaos_color_0 = np.array([0, 160, 160])/255
chaos_color_1 = np.array([233, 38, 41])/255
chaos_colors = [chaos_color_0, chaos_color_1]
chaos_palette = sns.color_palette(chaos_colors)
CLEAR_PREVIOUS_RUNS = False
# CLEAR_PREVIOUS_RUNS = True # Delete saved parameters from previous runs
if CLEAR_PREVIOUS_RUNS:
import shutil
shutil.rmtree('../data/output')
# % Shallow network tests
base_params = dict(N=200,
num_epochs=40,
num_train_samples_per_epoch=800,
num_test_samples_per_epoch=100,
X_clusters=60,
X_dim=200,
num_classes=2,
n_lag=0,
# g_radius=1,
g_radius=20,
wout_scale=1.,
clust_sig=.02,
input_scale=1.0,
input_style='hypercube',
model_seed=1,
n_hold=1,
n_out=1,
use_biases=False, # use_biases=True,
loss='mse',
optimizer='sgd',
momentum=0,
dt=.01,
learning_rate=1e-3,
# learning_patience=100,
scheduler='onecyclelr_4e4',
learning_patience=20,
scheduler_factor=10,
# scheduler='plateau',
# learning_patience=5,
# scheduler_factor=.5,
# scheduler='cyclic',
# learning_patience=20,
# scheduler_factor=.05,
batch_size=1,
train_output_weights=True,
freeze_input=False,
network='vanilla_rnn',
# network='feedforward',
Win='orthog',
l2_regularization=0,
patience_before_stopping=6000,
hid_nonlin='linear',
saves_per_epoch=1,
rerun=False,
# rerun=True,
)
# %% Probing batch sizes and freezing training weights
params_shallow_1 = {
'learning_rate': [1e-4, 5e-4, 1e-3, 5e-3, 1e-2, 5e-2],
'optimizer': ['sgd', 'rmsprop'],
'loss': ['mse'],
'hid_nonlin': ['linear'],
'l2_regularization': [0],
# 'l2_regularization': [0, 10],
'g_radius': [1]
}
params_shallow_2 = {
'learning_rate': [1e-4, 5e-4, 1e-3, 5e-3],
'optimizer': ['sgd', 'rmsprop'],
'loss': ['mse_scalar'],
'hid_nonlin': ['linear'],
'l2_regularization': [0],
# 'l2_regularization': [0, 10],
'g_radius': [1]
}
keys_shallow = list(params_shallow_1.keys())
keys_shallow_abbrev = ['lr', 'opt', 'loss', 'nonlin', 'l2', 'g']
ps_list_shallow = list(itertools.product(*params_shallow_1.values())) \
+ list(itertools.product(*params_shallow_2.values()))
def run_shallow_1(param_set, train_params, i0, multiprocess_lock=None):
# time.sleep(i0)
plots.USE_ERRORBARS = False
print(multiprocess_lock)
train_params = train_params.copy()
for i0, key in enumerate(keys_shallow):
train_params[key] = param_set[i0]
train_params['network'] = 'feedforward'
n_lag = 0
train_params['n_lag'] = n_lag
full_batch_size = train_params['num_train_samples_per_epoch']
tps_11 = train_params.copy()
tps_11['batch_size'] = 1
# tps_11['num_epochs'] = 200
# tps_11['saves_per_epoch'] = 1/20
tps_11['num_epochs'] = 100
tps_11['saves_per_epoch'] = [0]*100
for k in range(0, 10):
tps_11['saves_per_epoch'][k] = 2
for k in range(11, 100, 10):
tps_11['saves_per_epoch'][k] = 1
# tps_11['num_epochs'] = 100
# tps_11['saves_per_epoch'] = 1/10
if train_params['scheduler'] in ('onecyclelr_4e4', 'onecyclelr'):
tps_11['learning_patience'] = 20
tps_11['scheduler_factor'] = 10
tps_11['patience_before_stopping'] = tps_11['num_epochs']
tps_12 = train_params.copy()
tps_12['batch_size'] = full_batch_size
tps_12['num_epochs'] = 1000
tps_12['saves_per_epoch'] = 1/100
if train_params['scheduler'] in ('onecyclelr_4e4', 'onecyclelr'):
tps_12['learning_patience'] = 100
tps_12['scheduler_factor'] = 10
tps_12['patience_before_stopping'] = tps_12['num_epochs']
tps_21 = tps_11.copy()
tps_21['train_output_weights'] = False
tps_22 = tps_12.copy()
tps_22['train_output_weights'] = False
figname = ''.join(key + '_' + str(val) + '_' for key, val in
zip(keys_shallow_abbrev, param_set))
figname = figname[:-1]
subdir_prefix2 = Path('{}/'.format(tps_11['network']))
subdir_suffix = Path('nlag_{}_g_{}_l2_{}/'.format(n_lag, tps_11['g_radius'],
train_params[
'l2_regularization']))
plot_ps = ([tps_11, tps_12], [tps_21, tps_22], [0, 1, 2], 'batch_size',
'train_output_weights', figname)
subdir = subdir_prefix2/'dim_over_training'/subdir_suffix
plots.dim_through_training(*plot_ps, subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock)
subdir = subdir_prefix2/'dim_over_layers'/subdir_suffix
plots.dim_over_layers(*plot_ps, subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock)
subdir = subdir_prefix2/'orth_compression_through_training'/subdir_suffix
plots.orth_compression_through_training(*plot_ps,
subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock)
subdir = subdir_prefix2/'orth_compression_through_training_input_sep' \
''/subdir_suffix
plots.orth_compression_through_training_input_sep(*plot_ps,
subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock)
subdir = subdir_prefix2/'acc_over_training'/subdir_suffix
plots.acc_over_training(*plot_ps, subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock)
subdir = subdir_prefix2/'loss_over_training'/subdir_suffix
plots.loss_over_training(*plot_ps, subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock)
# %% Probing noise of readout weights through training
params_readout_noise_1 = params_shallow_1.copy()
params_readout_noise_2 = params_shallow_2.copy()
params_readout_noise_1['l2_regularization'] = [0]
params_readout_noise_2['l2_regularization'] = [0]
keys_readout_noise = list(params_readout_noise_1.keys())
keys_readout_noise_abbrev = ['lr', 'opt', 'loss', 'nonlin', 'l2', 'g']
ps_list_readout_noise = list(itertools.product(*params_readout_noise_1.values())) \
+ list(itertools.product(*params_readout_noise_2.values()))
def run_readout_noise(param_set, train_params, i0, multiprocess_lock=None):
# time.sleep(i0)
plots.USE_ERRORBARS = False
print(multiprocess_lock)
train_params = train_params.copy()
for i0, key in enumerate(keys_readout_noise):
train_params[key] = param_set[i0]
train_params['network'] = 'feedforward'
n_lag = 0
train_params['n_lag'] = n_lag
full_batch_size = train_params['num_train_samples_per_epoch']
tps_11 = train_params.copy()
tps_11['batch_size'] = 1
# tps_11['num_epochs'] = 200
# tps_11['saves_per_epoch'] = 1/20
tps_11['num_epochs'] = 100
tps_11['saves_per_epoch'] = 20
if train_params['scheduler'] in ('onecyclelr_4e4', 'onecyclelr'):
tps_11['learning_patience'] = 20
tps_11['scheduler_factor'] = 10
tps_11['patience_before_stopping'] = tps_11['num_epochs']
figname = ''.join(key + '_' + str(val) + '_' for key, val in
zip(keys_readout_noise_abbrev, param_set))
figname = figname[:-1]
subdir_prefix2 = Path('{}/'.format(tps_11['network']))
subdir_suffix = Path('nlag_{}_g_{}_l2_{}/'.format(n_lag, tps_11['g_radius'],
train_params[
'l2_regularization']))
subdir = subdir_prefix2/'weight_var_through_training'/subdir_suffix
plots.weight_var_through_training([tps_11], None, [0, 1, 2],
hue_key='batch_size',
style_key='train_output_weights',
figname=figname,
subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock)
# % Recurrent network experiments
# %% batch sizes
params_recurrent_1 = {
# 'learning_rate': [1e-4, 5e-4, 1e-3, 5e-3, 1e-2, 5e-2],
'learning_rate': [1e-4, 5e-4, 1e-3, 5e-3],
'optimizer': ['sgd', 'rmsprop'],
'loss': ['mse', 'mse_scalar'],
'hid_nonlin': ['tanh', 'linear'],
'l2_regularization': [0, 50],
'g_radius': [20]
}
params_recurrent_2 = {
'learning_rate': [1e-6, 1e-5, 1e-4],
'optimizer': ['sgd', 'rmsprop'],
'loss': ['cce'],
'hid_nonlin': ['tanh', 'linear'],
'l2_regularization': [0],
'g_radius': [20]
}
keys_deep = list(params_recurrent_1.keys())
keys_abbrev = ['lr', 'opt', 'loss', 'nonlin', 'l2', 'g']
ps_list_recurrent = list(itertools.product(*params_recurrent_1.values())) + \
list(itertools.product(*params_recurrent_2.values()))
def run_recurrent(param_set, train_params, i0, multiprocess_lock=None):
plots.USE_ERRORBARS = False
# time.sleep(i0)
print(multiprocess_lock)
train_params = train_params.copy()
for i0, key in enumerate(keys_deep):
train_params[key] = param_set[i0]
# if train_params['loss'] == 'mse_scalar':
# train_params['rerun'] = True
train_params['n_lag'] = 10
lr = train_params['learning_rate']
optimizer = train_params['optimizer']
n_lag = train_params['n_lag']
figname = ''.join(
key + '_' + str(val) + '_' for key, val in zip(keys_abbrev, param_set))
figname = figname[:-1]
full_batch_size = train_params['num_train_samples_per_epoch']
tps_11 = train_params.copy()
tps_11['batch_size'] = 1
tps_11['num_epochs'] = 200
tps_11['saves_per_epoch'] = 1/20
# tps_11['num_epochs'] = 100
# tps_11['saves_per_epoch'] = [0]*100
# for k in range(0, 10):
# tps_11['saves_per_epoch'][k] = 2
# for k in range(11, 100, 10):
# tps_11['saves_per_epoch'][k] = 1
if train_params['scheduler'] in ('onecyclelr_4e4', 'onecyclelr'):
tps_11['learning_patience'] = 20
tps_11['scheduler_factor'] = 10
tps_11['patience_before_stopping'] = tps_11['num_epochs']
tps_11['patience_before_stopping'] = tps_11['num_epochs']
tps_12 = train_params.copy()
tps_12['batch_size'] = full_batch_size
tps_12['num_epochs'] = 1000
tps_12['saves_per_epoch'] = 1/100
if train_params['scheduler'] in ('onecyclelr_4e4', 'onecyclelr'):
tps_12['learning_patience'] = 20
tps_12['scheduler_factor'] = 10
tps_12['patience_before_stopping'] = tps_12['num_epochs']
# figname = "lr_{}_opt_{}_l2_{}".format(lr, optimizer,
# train_params['l2_regularization'])
subdir_prefix2 = Path('{}'.format(train_params['network']))
subdir_suffix = Path('nlag_{}_g_{}_l2_{}'.format(n_lag, tps_11['g_radius'],
train_params[
'l2_regularization']))
plot_ps = ([tps_11, tps_12], None, [0, 1], 'batch_size', None, figname)
subdir = subdir_prefix2/'dim_over_training'/subdir_suffix
plots.dim_through_training(*plot_ps, subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock)
subdir = subdir_prefix2/'dim_over_layers'/subdir_suffix
plots.dim_over_layers(*plot_ps, subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock)
subdir = subdir_prefix2/'orth_compression_through_training'/subdir_suffix
plots.orth_compression_through_training(*plot_ps,
subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock)
subdir = subdir_prefix2/'orth_compression_through_training_input_sep' \
''/subdir_suffix
plots.orth_compression_through_training_input_sep(*plot_ps,
subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock)
subdir = subdir_prefix2/'acc_over_training'/subdir_suffix
plots.acc_over_training(*plot_ps, subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock)
subdir = subdir_prefix2/'loss_over_training'/subdir_suffix
plots.loss_over_training(*plot_ps, subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock)
params_readout_noise_deep_1 = params_recurrent_1.copy()
params_readout_noise_deep_1['l2_regularization'] = [0]
params_readout_noise_deep_2 = params_recurrent_2.copy()
params_readout_noise_deep_2['l2_regularization'] = [0]
keys_readout_noise_deep = list(params_recurrent_1.keys())
keys_readout_noise_deep_abbrev = ['lr', 'opt', 'loss', 'nonlin', 'l2', 'g']
ps_list_readout_noise_recurrent = list(
itertools.product(*params_readout_noise_deep_1.values())) + \
list(itertools.product(
*params_readout_noise_deep_2.values()))
# pvals_readout_noise_deep = ps_vals_deep
def run_readout_noise_recurrent(param_set, train_params, i0, multiprocess_lock=None):
# time.sleep(i0)
plots.USE_ERRORBARS = False
print(multiprocess_lock)
train_params = train_params.copy()
for i0, key in enumerate(keys_readout_noise_deep):
train_params[key] = param_set[i0]
train_params['n_lag'] = 10
n_lag = train_params['n_lag']
tps_11 = train_params.copy()
tps_11['batch_size'] = 1
# tps_11['num_epochs'] = 200
# tps_11['saves_per_epoch'] = 1/20
tps_11['num_epochs'] = 100
tps_11['saves_per_epoch'] = 20
if train_params['scheduler'] in ('onecyclelr_4e4', 'onecyclelr'):
tps_11['learning_patience'] = 20
tps_11['scheduler_factor'] = 10
tps_11['patience_before_stopping'] = tps_11['num_epochs']
figname = ''.join(key + '_' + str(val) + '_' for key, val in
zip(keys_readout_noise_deep_abbrev, param_set))
figname = figname[:-1]
subdir_prefix2 = Path('{}/'.format(tps_11['network']))
subdir_suffix = Path('nlag_{}_g_{}_l2_{}/'.format(n_lag, tps_11['g_radius'],
train_params[
'l2_regularization']))
subdir = subdir_prefix2/'weight_var_through_training'/subdir_suffix
plots.weight_var_through_training([tps_11], None, [0, 1],
hue_key='batch_size',
style_key='train_output_weights',
figname=figname,
subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock)
subdir = subdir_prefix2/'weight_product_var_through_training'/subdir_suffix
plots.weight_var_through_training([tps_11], None, [0, 1],
hue_key='batch_size',
style_key='train_output_weights',
figname=figname,
subdir=subdir_prefix/subdir,
weight_type='product',
multiprocess_lock=multiprocess_lock)
# %% Low-d input experiments
low_d_params = dict(N=200, num_epochs=80, num_train_samples_per_epoch=800,
X_clusters=60, X_dim=2, num_classes=2, n_lag=10,
g_radius=20, clust_sig=.02, input_scale=1, n_hold=1,
n_out=1, loss='cce', optimizer='rmsprop', dt=.01,
momentum=0, learning_rate=1e-3, batch_size=10,
freeze_input=False, network='vanilla_rnn', Win='orthog',
patience_before_stopping=6000, hid_nonlin='tanh',
model_seed=0, rerun=False)
params_lowd = {
'X_clusters': [40, 60, 120],
'X_dim': [2, 4, 10],
'learning_rate': [1e-4, 1e-3],
'n_lag': [6, 10, 14],
'N': [200, 300],
'loss': ['cce', 'mse'],
# 'l2_regularization': [0, 10, 50],
}
# params_lowd = {
# 'X_clusters': [60],
# 'X_dim': [2, 4],
# 'learning_rate': [1e-4, 1e-3, 5e-3],
# 'n_lag': [14],
# 'N': [200],
# 'loss': ['mse']
# }
keys_lowd = list(params_lowd.keys())
keys_abbrev_lowd = ['clust', 'xdim', 'lr', 'nlag', 'N', 'loss']
ps_list_lowd = list(itertools.product(*params_lowd.values()))
def run_lowd(param_set, train_params, i0, multiprocess_lock=None):
plots.USE_ERRORBARS = True
# time.sleep(i0)
tps = low_d_params.copy()
# tps['table_path'] = 'output_lowd/output_table.csv'
subdir_prefix = Path('Win_orth_lowd/')
for i0, key in enumerate(keys_lowd):
tps[key] = param_set[i0]
tps_11 = tps.copy()
tps_11['g_radius'] = 20
tps_21 = tps_11.copy()
tps_21['num_epochs'] = 0
tps_12 = tps.copy()
tps_12['g_radius'] = 250
tps_22 = tps_12.copy()
tps_22['num_epochs'] = 0
figname = ''.join(key + '_' + str(val) + '_' for key, val in
zip(keys_abbrev_lowd, param_set))
figname = figname[:-1]
# print(figname)
# figname = "lr_{}_clust_{}_layers_{}".format(lr, x_cluster, n_lag)
subdir = subdir_prefix/'dim_over_layers'
plots.dim_over_layers([tps_11, tps_12], [tps_21, tps_22], seeds=[0, 1, 2],
hue_key='g_radius', style_key='num_epochs',
figname=figname + '_g_{}'.format(tps_11['g_radius']),
subdir=subdir, multiprocess_lock=multiprocess_lock,
style_order=[80, 0], palette=chaos_palette)
plots.dim_over_layers([tps_11, tps_12], [tps_21, tps_22], seeds=[0, 1, 2],
hue_key='g_radius', style_key='num_epochs',
figname=figname + '_g_{}'.format(tps_11['g_radius']),
subdir=subdir, multiprocess_lock=multiprocess_lock,
style_order=[80, 0], palette=chaos_palette)
subdir = subdir_prefix/'clust_holdout_over_layers'
plots.clust_holdout_over_layers([tps_11, tps_12], [tps_21, tps_22], seeds=[0, 1, 2],
hue_key='g_radius', style_key='num_epochs',
figname=figname + '_g_{}'.format(tps_11['g_radius']),
subdir=subdir, multiprocess_lock=multiprocess_lock,
style_order=[80, 0], palette=chaos_palette)
# subdir = subdir_prefix/'ashok_compression_metric'
# plots.ashok_compression_metric([tps_12], [tps_21, tps_22], seeds=[0, 1, 2],
# style_key='num_epochs',
# figname=figname + '_g_{}'.format(
# tps_12['g_radius']), subdir=subdir,
# multiprocess_lock=multiprocess_lock,
# style_order=[80, 0])
plots.USE_ERRORBARS = False
# %% Low-d chaos experiments (Figures S
low_d_chaos_params = dict(N=200, num_epochs=80, num_train_samples_per_epoch=800,
X_clusters=60, X_dim=2, num_classes=2, n_lag=10,
g_radius=20, clust_sig=.02, input_scale=1, n_hold=1,
n_out=1, loss='cce', optimizer='rmsprop', dt=.01,
momentum=0, learning_rate=1e-3, batch_size=10,
freeze_input=False, network='vanilla_rnn',
Win='orthog',
patience_before_stopping=6000, hid_nonlin='tanh',
model_seed=0, rerun=False)
params_lowd_chaos = {
'X_clusters': [60],
'X_dim': [2],
'learning_rate': [1e-3, 1e-4],
'n_lag': [10]
}
keys_lowd_chaos = list(params_lowd_chaos.keys())
keys_abbrev_lowd_chaos = ['clust', 'xdim', 'lr', 'nlag']
ps_list_lowd_chaos = list(itertools.product(*params_lowd_chaos.values()))
def run_lowd_chaos(param_set, train_params, i0, multiprocess_lock=None):
plots.USE_ERRORBARS = True
# time.sleep(i0)
tps = train_params.copy()
subdir_prefix = Path('Win_orth_lowd_chaos/')
for i0, key in enumerate(keys_lowd_chaos):
tps[key] = param_set[i0]
tp_list_0 = []
tp_list_1 = []
for g in range(20, 261, 40):
tp = tps.copy()
tp['g_radius'] = g
tp_list_1.append(tp)
tp = tp.copy()
tp['num_epochs'] = 0
tp_list_0.append(tp)
figname = ''.join(key + '_' + str(val) + '_' for key, val in
zip(keys_abbrev_lowd_chaos, param_set))
figname = figname[:-1]
seeds = list(range(5))
subdir = subdir_prefix/'dim_over_layers'
plots.dim_over_layers(tp_list_0, None, seeds=seeds,
hue_key='g_radius', style_key='num_epochs',
figname=figname + '_before',
subdir=subdir, use_error_bars=True,
multiprocess_lock=multiprocess_lock,
palette='viridis')
plots.dim_over_layers(tp_list_1, None, seeds=seeds,
hue_key='g_radius', style_key='num_epochs',
figname=figname,
use_error_bars=True,
subdir=subdir, multiprocess_lock=multiprocess_lock,
palette='viridis')
plots.USE_ERRORBARS = False
high_d_input_edge_of_chaos_params = dict(N=200,
num_epochs=40,
num_train_samples_per_epoch=800,
X_clusters=60,
X_dim=200,
num_classes=2,
n_lag=10,
g_radius=20,
clust_sig=.02,
input_scale=1,
n_hold=1,
n_out=1,
loss='cce',
optimizer='rmsprop',
dt=.01,
momentum=0,
learning_rate=1e-3,
batch_size=10,
freeze_input=False,
train_output_weights=True,
network='vanilla_rnn',
Win='orthog',
patience_before_stopping=6000,
hid_nonlin='tanh',
model_seed=0,
rerun=False)
# %% high-d input experiments with different hyperparameters
params_rnn_high_d = {
'loss': ['cce', 'mse', 'mse_scalar'],
'optimizer': ['sgd', 'rmsprop'],
'learning_rate': [1e-3, 1e-4],
'n_lag': [6, 10, 14],
'N': [200, 300]
}
ps_list_rnn_high_d_vals = list(itertools.product(*params_rnn_high_d.values()))
params_rnn_high_d_keys = params_rnn_high_d.keys()
params_rnn_high_d_keys_abbrev = ['loss', 'opt', 'lr', 'n_lag', 'N']
def run_rnn_high_d_input(param_set, multiprocess_lock=None):
plots.USE_ERRORBARS = False
subdir_prefix2 = Path('vanilla_rnn')
tps = high_d_input_edge_of_chaos_params.copy()
for i0, key in enumerate(params_rnn_high_d_keys):
tps[key] = param_set[i0]
seeds = list(range(5))
tps_11 = tps.copy()
tps_11['g_radius'] = 20
tps_12 = tps.copy()
tps_12['g_radius'] = 250
figname = ''.join(key + '_' + str(val) + '_' for key, val in
zip(params_rnn_high_d_keys_abbrev, param_set))
figname = figname[:-1]
subdir = subdir_prefix2/'dim_high_d_experiments'
plots.dim_over_layers([tps_11, tps_12], None, seeds, 'g_radius', None,
figname,
subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock,
palette=chaos_palette)
# %% Training RNN on high-d data and freezing output weights
# high_d_input_strongly_chaotic_params['g_radius'] = 250
params_rnn_freeze = {
'loss': ['cce', 'mse', 'mse_scalar'],
'optimizer': ['sgd', 'rmsprop'],
'learning_rate': [1e-3, 1e-4]
}
ps_list_rnn_freeze_vals = list(itertools.product(*params_rnn_freeze.values()))
params_rnn_freeze_keys = params_rnn_freeze.keys()
params_rnn_freeze_keys_abbrev = ['loss', 'opt', 'lr']
def run_rnn_freeze_output(param_set, multiprocess_lock=None):
plots.USE_ERRORBARS = True
subdir_prefix2 = Path('vanilla_rnn')
tps = high_d_input_edge_of_chaos_params.copy()
for i0, key in enumerate(params_rnn_freeze_keys):
tps[key] = param_set[i0]
seeds = list(range(5))
tps_11 = tps.copy()
tps_11['train_output_weights'] = True
tps_12 = tps.copy()
tps_12['train_output_weights'] = False
figname = ''.join(key + '_' + str(val) + '_' for key, val in
zip(params_rnn_freeze_keys_abbrev, param_set))
figname = figname[:-1]
plot_ps = (
[tps_11, tps_12], None, seeds, 'train_output_weights', None, figname)
subdir = subdir_prefix2/'dim_freeze_output_weights'
plots.dim_through_training(*plot_ps, subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock)
# %% Training RNN with noisy units
temp1 = {
'loss': ['cce', 'mse', 'mse_scalar'],
'optimizer': ['sgd', 'rmsprop'],
'learning_rate': [1e-3, 1e-4],
'dropout_p': [0, 0.05, 0.1, 0.2, 0.5],
'unit_injected_noise': [0]
}
temp2 = {
'loss': ['cce', 'mse', 'mse_scalar'],
'optimizer': ['sgd', 'rmsprop'],
'learning_rate': [1e-3, 1e-4],
'dropout_p': [0],
'unit_injected_noise': [0.05, 0.1, 0.2, 0.5]
}
# params_rnn_noisy_units =
ps_list_rnn_noisy_units_vals = list(itertools.product(*temp1.values())) \
+ list(itertools.product(*temp2.values()))
params_rnn_noisy_units_keys = temp1.keys()
params_rnn_noisy_units_keys_abbrev = ['loss', 'opt', 'lr', 'dropout', 'noise']
def run_rnn_noisy_units(param_set, multiprocess_lock=None):
plots.USE_ERRORBARS = False
subdir_prefix2 = Path('vanilla_rnn')
tps_high_d = high_d_input_edge_of_chaos_params.copy()
for i0, key in enumerate(params_rnn_noisy_units_keys):
tps_high_d[key] = param_set[i0]
seeds = list(range(5))
tps_11_high_d = tps_high_d.copy()
tps_11_high_d['g_radius'] = 20
tps_12_high_d = tps_high_d.copy()
tps_12_high_d['g_radius'] = 250
figname = ''.join(key + '_' + str(val) + '_' for key, val in
zip(params_rnn_noisy_units_keys_abbrev, param_set))
figname = figname[:-1]
subdir = subdir_prefix2/'dim_noisy_units'
plots.dim_over_layers([tps_11_high_d, tps_12_high_d], None, seeds,
'g_radius', None, figname + '_X_dim_200',
subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock,
palette=chaos_palette)
tps_low_d = low_d_params.copy()
for i0, key in enumerate(params_rnn_noisy_units_keys):
tps_low_d[key] = param_set[i0]
tps_11_low_d = tps_low_d.copy()
tps_11_low_d['g_radius'] = 20
tps_12_low_d = tps_low_d.copy()
tps_12_low_d['g_radius'] = 250
plots.dim_over_layers([tps_11_low_d, tps_12_low_d], None, seeds,
'g_radius', None, figname + '_X_dim_2',
subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock,
palette=chaos_palette)
tps_low_d_2neurons_11 = tps_low_d.copy()
tps_low_d_2neurons_11['Win'] = 'diagonal_first_two'
tps_low_d_2neurons_11['g_radius'] = 20
tps_low_d_2neurons_12 = tps_low_d.copy()
tps_low_d_2neurons_12['Win'] = 'diagonal_first_two'
tps_low_d_2neurons_12['g_radius'] = 250
plots.dim_over_layers([tps_low_d_2neurons_11, tps_low_d_2neurons_12], None,
seeds, 'g_radius', None,
figname + '_X_dim_2_ident',
subdir=subdir_prefix/subdir,
multiprocess_lock=multiprocess_lock,
palette=chaos_palette)
# %% Run things here
if __name__ == '__main__':
lock = Lock()
# Serial run
# [run_shallow_1(p, base_params, k) for k, p in enumerate(
# ps_list_shallow)] #
# [run_readout_noise(p, base_params, k) for k, p in
# enumerate(pvals_readout_noise)]
# [run_deep_1(p, base_params, k) for k, p in enumerate(ps_vals_deep)]
# [run_readout_noise_deep(p, base_params, k) for k, p in
# enumerate(pvals_readout_noise_deep)]
# [run_lowd(p, low_d_params, k) for k, p in enumerate(params_lowd_loop)]
# [run_lowd_chaos(p, low_d_chaos_params, k) for k, p in
# enumerate(params_lowd_chaos_loop)]
# [run_rnn_high_d_input(p) for k, p in enumerate(params_rnn_high_d_vals)]
# [run_rnn_freeze_output(p) for p in params_rnn_freeze_vals]
# [run_rnn_noisy_units_output(p) for p in params_rnn_noisy_units_vals]
print("Setting up multiprocess.")
processes = []
processes += [Process( # Plots for Figure S5
target=run_rnn_freeze_output,
args=(p, lock)
) for p in ps_list_rnn_freeze_vals]
processes += [Process( # Plots for Figures S6 and S7
target=run_shallow_1, args=(p, base_params, i0, lock)) for i0, p in
enumerate(ps_list_shallow)]
processes += [Process( # Plots for Figures S6 and S7
target=run_readout_noise, args=(p, base_params, i0, lock)
) for i0, p in enumerate(ps_list_readout_noise)]
processes += [Process( # Plots for Figures S8-S11
target=run_recurrent, args=(p, base_params, i0, lock)) for i0, p in
enumerate(ps_list_recurrent)]
processes += [Process( # Plots for Figures S8-S11
target=run_readout_noise_recurrent, args=(p, base_params, i0, lock)
) for i0, p in enumerate(ps_list_readout_noise_recurrent)]
processes += [Process( # Plots for Figure S13
target=run_rnn_high_d_input,
args=(p, lock)
) for p in ps_list_rnn_high_d_vals]
processes += [Process( # Plots for Figures S14--S16, S17, and S22
target=run_lowd, args=(p, low_d_params, i0, lock)
) for i0, p in enumerate(ps_list_lowd)]
processes += [Process( # Plots for Figure S18
target=run_lowd_chaos, args=(p, low_d_chaos_params, i0, lock)
) for i0, p in enumerate(ps_list_lowd_chaos)]
processes += [Process( # Plots for Figure S18--S21
target=run_rnn_noisy_units,
args=(p, lock)
) for p in ps_list_rnn_noisy_units_vals]
print("Starting", len(processes), "processes")
[process.start() for process in processes]
print("Joining processes.")
[process.join() for process in processes]
|
reloader_helpers.py | import os
import sys
import signal
import subprocess
from time import sleep
from multiprocessing import Process
def _iter_module_files():
"""This iterates over all relevant Python files.
It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package.
"""
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
if module is None:
continue
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
def _get_args_for_reloading():
"""Returns the executable."""
rv = [sys.executable]
rv.extend(sys.argv)
return rv
def restart_with_reloader():
"""Create a new process and a subprocess in it with the same arguments as
this one.
"""
args = _get_args_for_reloading()
new_environ = os.environ.copy()
new_environ['SANIC_SERVER_RUNNING'] = 'true'
cmd = ' '.join(args)
worker_process = Process(
target=subprocess.call, args=(cmd,),
kwargs=dict(shell=True, env=new_environ))
worker_process.start()
return worker_process
def kill_process_children_unix(pid):
"""Find and kill child processes of a process (maximum two level).
:param pid: PID of parent process (process ID)
:return: Nothing
"""
root_process_path = "/proc/{pid}/task/{pid}/children".format(pid=pid)
if not os.path.isfile(root_process_path):
return
with open(root_process_path) as children_list_file:
children_list_pid = children_list_file.read().split()
for child_pid in children_list_pid:
children_proc_path = "/proc/%s/task/%s/children" % \
(child_pid, child_pid)
if not os.path.isfile(children_proc_path):
continue
with open(children_proc_path) as children_list_file_2:
children_list_pid_2 = children_list_file_2.read().split()
for _pid in children_list_pid_2:
os.kill(int(_pid), signal.SIGTERM)
def kill_process_children_osx(pid):
"""Find and kill child processes of a process.
:param pid: PID of parent process (process ID)
:return: Nothing
"""
subprocess.run(['pkill', '-P', str(pid)])
def kill_process_children(pid):
"""Find and kill child processes of a process.
:param pid: PID of parent process (process ID)
:return: Nothing
"""
if sys.platform == 'darwin':
kill_process_children_osx(pid)
elif sys.platform == 'posix':
kill_process_children_unix(pid)
else:
pass # should signal error here
def kill_program_completly(proc):
"""Kill worker and it's child processes and exit.
:param proc: worker process (process ID)
:return: Nothing
"""
kill_process_children(proc.pid)
proc.terminate()
os._exit(0)
def watchdog(sleep_interval):
"""Watch project files, restart worker process if a change happened.
:param sleep_interval: interval in second.
:return: Nothing
"""
mtimes = {}
worker_process = restart_with_reloader()
signal.signal(
signal.SIGTERM, lambda *args: kill_program_completly(worker_process))
signal.signal(
signal.SIGINT, lambda *args: kill_program_completly(worker_process))
while True:
for filename in _iter_module_files():
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
kill_process_children(worker_process.pid)
worker_process = restart_with_reloader()
mtimes[filename] = mtime
break
sleep(sleep_interval)
|
way.py | import time
import math
from threading import Thread
msgHeader = "[AGENT]: "
#Constants file
import os
DISPLAY_WIDTH = 1024
DISPLAY_HEIGHT = 728
MAPS_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "maps")
STRATEGIES_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "strategies")
MEDIA_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "media")
ZENWHEELS_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "zenwheels")
CALIBRATION_IMG_PATH = os.path.join(MEDIA_DIR, 'checkerboard.png')
#World file
msgHeader = "[WORLD]: "
class World():
def __init__(self, agents, vehicles, map, waypoints):
self.world_data = {'agents': agents,
'vehicles': vehicles,
'dimensions': (DISPLAY_WIDTH, DISPLAY_HEIGHT),
'map': map,
'waypoints': waypoints}
print(msgHeader + "Initialisation complete.")
# Update the world state.
def update(self, car_locations):
for known_vehicle in self.world_data['vehicles']:
updated = False
for observed_car in car_locations:
if observed_car['ID'] == known_vehicle.owner.ID:
known_vehicle.position = observed_car['position']
known_vehicle.orientation = observed_car['orientation']
updated = True
break
if not updated:
known_vehicle.position = None
known_vehicle.orientation = None
print(self.world_data)
def get_world_data(self):
return dict(self.world_data)
###Agent file
class Agent():
def __init__(self, ID, vehicleType="car", strategyFile=None):
self.ID = str(ID)
if vehicleType.lower() == "car":
self.vehicle = vehicle.Car(self)
elif vehicleType.lower() == "truck":
self.vehicle = vehicle.Truck(self)
elif vehicleType.lower() == "motorcycle":
self.vehicle = vehicle.Motorcycle(self)
elif vehicleType.lower() == "bicycle":
self.vehicle = vehicle.Bicycle(self)
else:
print(msgHeader + "Could not initialise Agent " + self.ID + " with vehicle type '" + vehicleType + "'.")
self.vehicle = vehicle.Car(self)
self.worldKnowledge = {}
self.strategy = None
if strategyFile is not None:
try:
self.strategy = import_file("strategy", strategyFile)
print(msgHeader + "Successfully loaded the strategy file for Agent " + self.ID + ".")
except:
print(msgHeader + "Could not load the strategy file for Agent " + self.ID + ". (Fatal)")
exit()
self.stopped = False
def start(self):
t_process = Thread(target=self.update)
t_process.daemon = True
t_process.start()
return self
def update(self):
while True:
if self.stopped or not self.strategy:
return
self.strategy.make_decision(self)
time.sleep(0.2)
def stop(self):
self.vehicle.stop()
self.stopped = True
def update_world_knowledge(self, worldData):
for key in self.worldKnowledge:
if key in worldData:
self.worldKnowledge[key] = worldData[key]
def aim_speed(self, speed):
cspeed = self.vehicle.current_speed
if (cspeed is None):
cspeed = 0
if (speed > cspeed):
diff = speed - cspeed
if (diff > self.vehicle.max_acceleration):
diff = self.vehicle.max_acceleration
self.vehicle.set_speed(cspeed + diff)
else:
diff = cspeed - speed
if (diff > self.vehicle.max_deceleration):
diff = self.vehicle.max_deceleration
self.vehicle.set_speed(cspeed - diff)
def aim_angle(self, angle):
cangle = self.vehicle.orientation
if (cangle is None):
cangle = 0
diff = int(math.fabs(angle - cangle))
if (diff > 180):
diff = 360 - diff
if (cangle < angle):
da = -diff
else:
da = diff
else:
if (cangle < angle):
da = diff
else:
da = -diff
self.vehicle.set_angle(da // 3)
def get_vector_between_points(self, x1, y1, x2, y2):
if (x1 != None and y1 != None):
dx = x2 - x1
dy = y2 - y1
dist = int(math.sqrt(dx * dx + dy * dy))
theta = 0
if (dx != 0):
theta = math.atan(dy / dx) * (180 / math.pi)
if (dx == 0):
if (dy <= 0):
theta = 0
else:
theta = 180
elif (dy == 0):
if (dx < 0):
theta = 270
else:
theta = 90
elif (dx > 0 and dy > 0):
theta = theta + 90
elif (dx > 0 and dy < 0):
theta = theta + 90
elif (dx < 0 and dy > 0):
theta = theta + 270
elif (dx < 0 and dy < 0):
theta = theta + 270
return (dist, theta)
return (None, None)
# Return Distance and Angle to current waypoint. Angle must be degrees clockwise from north
def get_vector_to_waypoint(self):
if (self.vehicle.position[0] != None and self.vehicle.position[1] != None):
wpi = self.get_waypoint_index()
if (wpi != None):
if (self.worldKnowledge['waypoints'] != []):
x1 = self.vehicle.position[0]
y1 = self.vehicle.position[1]
x2 = self.worldKnowledge['waypoints'][wpi][0]
y2 = self.worldKnowledge['waypoints'][wpi][1]
return self.get_vector_between_points(x1, y1, x2, y2)
return (None, None)
# Return current waypoint index
def get_waypoint_index(self):
return self.worldKnowledge['waypoint_index']
# Set current waypoint index
def set_waypoint_index(self, wp):
mmax = len(self.worldKnowledge['waypoints']) - 1
if (wp > mmax):
wp = 0
if (wp < 0):
wp = mmax
self.worldKnowledge['waypoint_index'] = wp
# This measures the execution time of this code
start = time.time()
# When you get to the first way point again return a time
def waypoint_index_repeat(self,wp):
end = time.time()
print(end - start)
def import_file(full_name, path):
from importlib import util
spec = util.spec_from_file_location(full_name, path)
mod = util.module_from_spec(spec)
spec.loader.exec_module(mod)
return mod |
mp03process.py | #!/usr/bin/env python
"""mp3process.py: Use multiprocessing.Process
Usage:
mp3process.py
"""
from multiprocessing import Process
import os
def info(title):
print(title)
print('module name:', __name__)
print('parent process:', os.getppid())
print('process id:', os.getpid())
def f(name):
info('function f')
print('hello', name)
def main():
info('main')
p = Process(target=f, args=('bob',))
p.start()
p.join()
if __name__ == '__main__':
main()
|
reloader.py | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
"""
A file monitor and server restarter.
Use this like:
..code-block:: Python
import reloader
reloader.install()
Then make sure your server is installed with a shell script like::
err=3
while test "$err" -eq 3 ; do
python server.py
err="$?"
done
or is run from this .bat file (if you use Windows)::
@echo off
:repeat
python server.py
if %errorlevel% == 3 goto repeat
or run a monitoring process in Python (``paster serve --reload`` does
this).
Use the ``watch_file(filename)`` function to cause a reload/restart for
other other non-Python files (e.g., configuration files). If you have
a dynamic set of files that grows over time you can use something like::
def watch_config_files():
return CONFIG_FILE_CACHE.keys()
paste.reloader.add_file_callback(watch_config_files)
Then every time the reloader polls files it will call
``watch_config_files`` and check all the filenames it returns.
"""
import os
import sys
import time
import threading
import traceback
from paste.util.classinstance import classinstancemethod
def install(poll_interval=1):
"""
Install the reloading monitor.
On some platforms server threads may not terminate when the main
thread does, causing ports to remain open/locked. The
``raise_keyboard_interrupt`` option creates a unignorable signal
which causes the whole application to shut-down (rudely).
"""
mon = Monitor(poll_interval=poll_interval)
t = threading.Thread(target=mon.periodic_reload)
t.setDaemon(True)
t.start()
class Monitor(object):
instances = []
global_extra_files = []
global_file_callbacks = []
def __init__(self, poll_interval):
self.module_mtimes = {}
self.keep_running = True
self.poll_interval = poll_interval
self.extra_files = list(self.global_extra_files)
self.instances.append(self)
self.file_callbacks = list(self.global_file_callbacks)
def periodic_reload(self):
while True:
if not self.check_reload():
# use os._exit() here and not sys.exit() since within a
# thread sys.exit() just closes the given thread and
# won't kill the process; note os._exit does not call
# any atexit callbacks, nor does it do finally blocks,
# flush open files, etc. In otherwords, it is rude.
os._exit(3)
break
time.sleep(self.poll_interval)
def check_reload(self):
filenames = list(self.extra_files)
for file_callback in self.file_callbacks:
try:
filenames.extend(file_callback())
except:
print >> sys.stderr, "Error calling paste.reloader callback %r:" % file_callback
traceback.print_exc()
for module in sys.modules.values():
try:
filename = module.__file__
except (AttributeError, ImportError), exc:
continue
if filename is not None:
filenames.append(filename)
for filename in filenames:
try:
stat = os.stat(filename)
if stat:
mtime = stat.st_mtime
else:
mtime = 0
except (OSError, IOError):
continue
if filename.endswith('.pyc') and os.path.exists(filename[:-1]):
mtime = max(os.stat(filename[:-1]).st_mtime, mtime)
elif filename.endswith('$py.class') and \
os.path.exists(filename[:-9] + '.py'):
mtime = max(os.stat(filename[:-9] + '.py').st_mtime, mtime)
if not self.module_mtimes.has_key(filename):
self.module_mtimes[filename] = mtime
elif self.module_mtimes[filename] < mtime:
print >> sys.stderr, (
"%s changed; reloading..." % filename)
return False
return True
def watch_file(self, cls, filename):
"""Watch the named file for changes"""
filename = os.path.abspath(filename)
if self is None:
for instance in cls.instances:
instance.watch_file(filename)
cls.global_extra_files.append(filename)
else:
self.extra_files.append(filename)
watch_file = classinstancemethod(watch_file)
def add_file_callback(self, cls, callback):
"""Add a callback -- a function that takes no parameters -- that will
return a list of filenames to watch for changes."""
if self is None:
for instance in cls.instances:
instance.add_file_callback(callback)
cls.global_file_callbacks.append(callback)
else:
self.file_callbacks.append(callback)
add_file_callback = classinstancemethod(add_file_callback)
if sys.platform.startswith('java'):
try:
from _systemrestart import SystemRestart
except ImportError:
pass
else:
class JythonMonitor(Monitor):
"""
Monitor that utilizes Jython's special
``_systemrestart.SystemRestart`` exception.
When raised from the main thread it causes Jython to reload
the interpreter in the existing Java process (avoiding
startup time).
Note that this functionality of Jython is experimental and
may change in the future.
"""
def periodic_reload(self):
while True:
if not self.check_reload():
raise SystemRestart()
time.sleep(self.poll_interval)
watch_file = Monitor.watch_file
add_file_callback = Monitor.add_file_callback
|
sync.py | """
@author axiner
@version v1.0.0
@created 2022/5/2 9:34
@abstract
@description
@history
"""
import sys
from multiprocessing import Process, Queue
from toollib import utils
try:
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
except ImportError:
raise
class FileEventHandler(FileSystemEventHandler):
def __init__(self, q):
super().__init__()
self.queue = q
def on_created(self, event):
event_name = sys._getframe().f_code.co_name
if event.is_directory:
_even_type = (event_name, 'dir')
else:
_even_type = (event_name, 'file')
self.queue.put((_even_type, {'src_path': event.src_path}))
def on_deleted(self, event):
event_name = sys._getframe().f_code.co_name
if event.is_directory:
_even_type = (event_name, 'dir')
else:
_even_type = (event_name, 'file')
self.queue.put((_even_type, {'src_path': event.src_path}))
def on_modified(self, event):
event_name = sys._getframe().f_code.co_name
if event.is_directory:
_even_type = (event_name, 'dir')
else:
_even_type = (event_name, 'file')
self.queue.put((_even_type, {'src_path': event.src_path}))
def on_moved(self, event):
event_name = sys._getframe().f_code.co_name
if event.is_directory:
_even_type = (event_name, 'dir')
else:
_even_type = (event_name, 'file')
self.queue.put((_even_type, {'src_path': event.src_path, 'dest_path': event.dest_path}))
def on_closed(self, event):
event_name = sys._getframe().f_code.co_name
if event.is_directory:
_even_type = (event_name, 'dir')
else:
_even_type = (event_name, 'file')
self.queue.put((_even_type, {'src_path': event.src_path}))
def monitor(queue, src):
observer = Observer()
event_handler = FileEventHandler(queue)
observer.schedule(event_handler, src, recursive=True)
observer.start()
try:
while True:
pass
except KeyboardInterrupt:
observer.stop()
observer.join()
def sync(queue, src, dest, ip, user, port, suffix):
while True:
event = queue.get()
if event:
if not event[1].get('src_path', '').endswith(('.swp', '.swx')):
sys.stdout.write(f'[tcli][info]{event}\n')
_rsync(src, dest, ip, user, port, suffix)
def _rsync(src, dest, ip, user, port, suffix):
for _ in range(3):
try:
if suffix:
dest = f'{dest}-{suffix}'
if port == 22:
cmd = f'rsync -avz --delete --exclude={{*.swp,*.swx}} ' \
f'{src} {user}@{ip}:{dest}'
else:
cmd = f'rsync -avz -e "ssh -p {port}" --delete --exclude={{*.swp,*.swx}} ' \
f'{src} {user}@{ip}:{dest}'
p = utils.syscmd(cmd)
out, err = p.communicate()
if out:
sys.stdout.write(u'[tcli][info]{0}'.format(out.decode('utf-8')))
if err:
sys.stderr.write(u'[tcli][error]{0}'.format(err.decode('utf-8')))
except Exception as err:
sys.stderr.write(f'[tcli][error]{str(err)}\n')
sys.stdout.write('[tcli][info][重试中]...\n')
def execute(src, dest, ip, user, port, suffix):
sys.stdout.write('[tcli][info]start...(请确保主备服务可免密登录)\n')
queue = Queue()
_rsync(src, dest, ip, user, port, suffix)
producer = Process(target=monitor, args=(queue, src))
consumer = Process(target=sync, args=(queue, src, dest, ip, user, port, suffix))
producer.start()
consumer.start()
|
tree.py | # CastPodder GUI -- tree code
# Copyright (c) 2005-2006 Scott Grayban and the CastPodder Team
#
# Based on edj's amazing resizable OPMLOutliner code.
# Heavily mutated by Scott.
#
# $Id: tree.py 147 2006-11-07 08:17:03Z sgrayban $
"""
CastPodder is Copright © 2005-2006 Scott Grayban
Read the file Software_License_Agreement.txt for more info.
"""
__license__ = "Commercial"
from wxPython.wx import *
import types
from xml.sax import make_parser, handler
import wx
import logging
import StringIO
import os.path
import sys
from ipodder import outlines
from ipodder import grabbers
from ipodder import threads
from ipodder import hooks
from ipodder.contrib import urlnorm
from ipodder.contrib import GenericDispatch
import gui
from gui import images
from skin import \
DIRECTORY_LINK_SCANNED,\
DIRECTORY_LINK_UNSCANNED,\
DIRECTORY_LINK_SCANNING
log = logging.getLogger('iPodder.Outliner')
basepath = os.path.abspath(os.path.split(sys.argv[0])[0])
ACTIVE_FEED_SUB_STATES = ['subscribed', 'newly-subscribed', 'preview', 'force']
class TreeNode:
"""A GUI tree node that points to the abstract tree node."""
def __init__(self, parent, node):
"""Initialise the TreeNode.
parent -- the parent TreeNode or, if this node is the root, the
OPMLTree itself.
node -- the outlines.Node underlying this GUI tree node.
"""
self.appending = False
# Check the node is a node.
assert isinstance(node, outlines.Node)
# Handle the parent.
if isinstance(parent, OPMLTree):
opmltree = parent
parent = None
id = opmltree.AddRoot(node.text)
elif isinstance(parent, TreeNode):
opmltree = parent.opmltree
id = opmltree.AppendItem(parent.id, node.text)
else:
raise ValueError, parent
#log.debug("id: %s", repr(id))
opmltree.SetPyData(id, self)
self.opmltree = opmltree
self.node = node
self.parent = parent
self.id = id
self.scanned = False # only useful for links to OPML
self.scanthread = None
node.hooks.add('changed', self.nodechanged)
node.hooks.add('child-changed', self.childnodechanged)
self.nodechanged(node) # forces scan of our abstract node
self.childnodechanged(node, None) # ... and of our children
def nodechanged(self, node):
"""Deal with the underlying abstract node changing."""
self.opmltree.ThreadSafeDispatch(self._nodechanged, node)
def _nodechanged(self, node):
"""Deal with the underlying abstract node changing."""
if self.appending:
self.appending = False
TreeNode(self,node[-1])
return
self.seticon()
for child in node:
TreeNode(self, child)
#AG HACK to expand the Directory root node on startup.
if self.parent == None:
self.opmltree.Expand(self.id)
def childnodechanged(self, node, child):
"""Deal with a child of the underlying abstract node changing."""
self.opmltree.ThreadSafeDispatch(self._childnodechanged, node, child)
def _childnodechanged(self, node, child):
"""Deal with a child of the underlying abstract node changing."""
self.seticon()
def seticon(self, expanding=False):
"""Set our icon."""
# TODO: we need a plain text node icon
opmltree = self.opmltree
id = self.id
node = self.node
image = None
colour = wx.NullColor
if len(node):
image = opmltree.fldridx
opmltree.SetItemHasChildren(id, TRUE)
else:
image = opmltree.textidx
opmltree.SetItemHasChildren(id, FALSE)
if node.type == 'link':
if self.opmltree.IsOpmlUrl(node.url):
image = opmltree.netfldridx
if self.scanned:
colour = DIRECTORY_LINK_SCANNED
if len(node):
opmltree.SetItemHasChildren(id, TRUE)
else:
opmltree.SetItemHasChildren(id, FALSE)
else:
opmltree.SetItemHasChildren(id, TRUE)
colour = DIRECTORY_LINK_UNSCANNED
else:
image = opmltree.remoteidx
if opmltree.feeds is not None:
try:
feed = opmltree.feeds[node.url]
if feed.sub_state in ACTIVE_FEED_SUB_STATES:
image = opmltree.tickidx
else:
image = opmltree.crossidx
except KeyError:
pass
if expanding:
image = opmltree.openidx
colour = DIRECTORY_LINK_SCANNING
if image is not None:
#log.debug("Setting icon image to %s", repr(image))
opmltree.SetItemImage(id, image)
opmltree.SetItemTextColour(id, colour)
opmltree.Refresh()
def scan(self):
"""Ask for our remote component to be scanned."""
log.debug("%s: scan requested.", repr(self))
if self.scanthread is not None:
log.debug("We already have a scan thread.")
self.scanthread.catch()
return
if self.scanned:
log.debug("Already scanned.")
return
node = self.node
if not node.type == 'link':
log.debug("No. We're not a link.")
return
if not self.opmltree.IsOpmlUrl(node.url):
log.debug("No. We don't aim at OPML.")
return
self.scanthread = threads.OurThread(target=self._scan)
self.scanthread.setDaemon(True)
self.scanthread.start()
def setmessage(self, message=None):
"""Append a message to our text."""
if not message:
self.opmltree.SetItemText(self.id, self.node.text)
else:
self.opmltree.SetItemText(self.id, "%s [%s]" % (
self.node.text, message))
def _scan(self):
"""Asynchronous scan method."""
node = self.node
self.seticon(expanding=True)
self.setmessage("downloading...")
sio = StringIO.StringIO()
grabber = grabbers.BasicGrabber(node.url, sio, state=self.opmltree.state, offline=False)
grabber.hooks.add('updated', lambda: self.setmessage(
"downloading %d%%" % (100*grabber.fraction_done)))
try:
res = grabber()
except grabbers.GrabError, ex:
log.error("Grab failed (%s) for %s", ex.message, node.url)
self.seticon(expanding=False)
self.setmessage(ex.message)
self.scanthread = None
return
filename, headers = res
# filename will be None
# no point looking at headers, really :)
opml = sio.getvalue()
sio.close()
try:
node = outlines.Head.fromopml(opml)
self.update_user_root_title(node)
except (AssertionError, outlines.xml.sax._exceptions.SAXParseException), ex:
log.error("Couldn't parse XML or OPML for node: %s", node.url)
#log.info(node.url)
#log.info(opml)
self.seticon(expanding=False)
self.setmessage("parsing failure")
self.scanthread = None
return
orphans = []
for child in node:
child.parent = None
orphans.append(child)
self.scanned = True
self.setmessage()
self.node.extend(orphans)
self.opmltree.ThreadSafeDispatch(self.opmltree.Expand, self.id)
self.scanthread = None # time to die! :)
def rescan(self):
self.node.DeleteChildren()
self.opmltree.DeleteChildren(self.id)
self.scanned = False
self.scan()
def append(self,node):
self.appending = True
self.node.append(node)
def update_user_root_title(self,head):
"""New roots may come in with bogus titles. Try to update them here."""
if isinstance(self.parent, TreeNode) and self.parent.parent == None:
if hasattr(head,"title"):
from ipodder import configuration
node = self.node
defaults = configuration.configDefaults['podcast_directory_roots']
for root in defaults:
if root[0] == node.url:
return
actual = self.opmltree.feeds.config.podcast_directory_roots
for root in actual:
if root[0] == node.url:
actual.remove(root)
actual.append((node.url,head.title))
self.node.text = head.title
return
class OPMLTree(GenericDispatch.GenericDispatchMixin, wxTreeCtrl):
'''Lazy Tree is a simple "Lazy Evaluation" tree,
that is, it only adds items to the tree view when
they are needed.'''
ISNEW = 1
def __init__(self, *a, **kw):
# Stops us from being able to run interactively,
# but at least works in iPodderGui.
p = wx.PreTreeCtrl()
self.PostCreate(p)
# wxTreeCtrl.__init__(self, *a, **kw)
# GenericDispatch.GenericDispatchMixin.__init__(self)
self.hooks = hooks.HookCollection()
def Init(self, roots, feeds, state):
self.feeds = feeds
self.state = state
EVT_TREE_ITEM_EXPANDING(self, self.GetId(), self.OnExpandNode)
EVT_TREE_SEL_CHANGED(self, self.GetId(), self.OnSelectNode)
EVT_TREE_ITEM_ACTIVATED(self, self.GetId(), self.OnActivateNode)
GenericDispatch.EVT_DISPATCH(self, self.OnDispatchEvent)
isz = (16,16)
il = wx.ImageList(isz[0], isz[1])
def iladd(name):
icon = gui.geticon(name)
try:
return il.Add(icon)
except wx.PyAssertionError, ex:
log.exception("Failed to add icon %s to image list; "\
"it's probably corrupt.", name)
return il.Add(gui.geticon('smiles')) # probably OK
self.textidx = iladd('normal file')
self.fldridx = iladd('folder')
self.fldropenidx = iladd('file open')
self.openidx = self.fldropenidx
self.fileidx = iladd('report view')
self.smileidx = iladd('smiles')
self.netfldridx = iladd('netflder')
self.remoteidx = iladd('remote')
self.tickidx = iladd('remote-sub')
self.crossidx = iladd('icon_feed_disabled')
self.SetImageList(il)
self.il = il
self.logpanel = -1
self.DeleteAllItems()
self.InitMenus()
rootNode = outlines.Node('Directory')
if 0:
# Add feeds information
active = []
inactive = []
for feed in feeds:
tup = (str(feed), feed)
if feed.sub_state in ACTIVE_FEED_SUB_STATES:
active.append(tup)
else:
inactive.append(tup)
active.sort(); inactive.sort()
subsnode = outlines.Node('My Subscriptions')
for title, feed in active:
subsnode.append(outlines.Link(title, url=feed.url))
rootNode.append(subsnode)
for url, title in roots:
rootNode.append(outlines.Link(title, url=url))
self.root = TreeNode(self, rootNode)
#AG: Doesn't work. TreeNode invokes the thread-safe dispatcher,
#which appears to run after this command.
#self.Expand(self.root.id)
def InitMenus(self):
"""Initialise the menus."""
def addmenu(menu, text, callable):
"""Add an item to the menu"""
id = wx.NewId()
menu.Append(id, text)
wx.EVT_MENU(menu, id, callable)
nm = self.nodemenu = wx.Menu()
addmenu(nm, "Subscribe", self.Null)
addmenu(nm, "Unsubscribe", self.Null)
dm = self.dirmenu = wx.Menu()
addmenu(nm, "Add shortcut", self.Null)
addmenu(nm, "Remove shortcut", self.Null)
self.Bind(wx.EVT_TREE_ITEM_RIGHT_CLICK, self.OnRightClick, self)
def OnRightClick(self, event):
# TODO:
# choose a menu
# customise it
# pop it up
ID = event.GetItem()
treenode = self.GetPyData(ID)
self.hooks('right-click', self, ID, treenode)
def Null(self):
"""Do nothing."""
pass
def SetLogPanel(self, logpanel):
self.logpanel = logpanel
def OnExpandNode(self, event):
"""Expand a node. This does nothing at the moment."""
ID = event.GetItem()
node = self.GetPyData(ID)
if 0:
log.warn("Trying to scan the kids...")
# This doesn't work, I think because the way we import node trees
# stuffs up the parental relationships.
for child in node.node: ###
log.warn("Here's a kid.")
child.scan()
def OnSelectNode(self, event):
"""When the user selects a node, scan it."""
ID = event.GetItem()
try:
treenode = self.GetPyData(ID)
except wx._core.PyAssertionError:
# Windows doesn't behave well if you right-click a non-selected node.
event.Skip()
return
self.hooks('select-before-scan', self, ID, treenode)
treenode.scan()
def OnActivateNode(self, event):
"""When a user double-clicks a node, call a hook."""
ID = event.GetItem()
treenode = self.GetPyData(ID)
self.hooks('node-activated', self, ID, treenode)
def GetName( self, node ):
return str(node)
def GetChildren( self, node ):
if type(node) in (types.ListType, types.TupleType):
return node
else:
return []
def IsOpmlUrl(self,url):
if url.lower().endswith('.opml'):
return True
#If it's in the config directory, assume it's OPML.
for root in self.feeds.config.podcast_directory_roots:
if root[0] == url:
return True
return False
def AddDirectoryRoot(self,title,url):
self.root.append(outlines.Link(title, url=url))
self.feeds.config.podcast_directory_roots.append((url,title))
class TestApplication (wxPySimpleApp):
def __init__(self, feeds):
self.feeds = feeds
wxPySimpleApp.__init__(self)
def OnInit(self):
frame = wxFrame (NULL, -1, "test", size = (600,700))
panel = wxPanel(frame, -1)
sizer = wxBoxSizer( wxVERTICAL)
roots = [
('http://www.ipodder.org/discuss/reader$4.opml',
'iPodder.org: Podcasting Central'),
('http://www.castpodder.net/opml/CastPodder.opml',
'CastPodder Team Directory'),
('http://directory.ipodderx.com/opml/iPodderX_Picks.opml',
'iPodderX Top Picks'),
('http://directory.ipodderx.com/opml/iPodderX_Popular.opml',
'iPodderX Most Popular'),
('http://www.thesportspod.com/opml/sports.opml',
'TheSportsProd'),
('http://www.gigadial.net/public/opml/dial.opml',
'GigaDial')
]
lazy_tree = OPMLTree(panel)
lazy_tree.Init(roots, self.feeds, {})
sizer.Add(lazy_tree, 1, wxEXPAND)
panel.SetSizer(sizer)
panel.SetAutoLayout(true)
frame.Show(1)
self.SetTopWindow(frame)
return 1
def main():
from ipodder import configuration
from ipodder import feeds
import shelve
import pickle
logging.basicConfig()
log = logging.getLogger('iPodder')
log.setLevel(logging.DEBUG)
parser = configuration.makeCommandLineParser()
options, args = parser.parse_args()
if args:
parser.error("only need options; no arguments.")
config = configuration.Configuration(options)
state = shelve.open(config.state_db_file, 'c',
writeback=False, protocol=pickle.HIGHEST_PROTOCOL)
feeds = feeds.Feeds(config, state)
app = TestApplication(feeds)
app.MainLoop()
if __name__ == "__main__":
main()
|
energyServer.py | import web
import os
import sys
#import dynamicPopulation
import time
from threading import Thread
#from dynamicPopulation import showDynamicPopulation
import CarCounting.getDOTstream as D
import graphBackend
import staticData.foursquareCheckinData as FS
import staticData.staticTaxiData as TD
import staticData.staticCensusData as CD
import staticData.staticEnergyData as ED
import streamDaemon
import dynamicData.subwayData as SD
import dynamicData.vehicleData as VD
import dynamicData.GPSendpoint as GPSendpoint
import DBMgr
import dynamicData.loadBuildingData as LBD
import dynamicData.getFootprint as F
db = DBMgr.DBMgr()
db.pullCoordinates("45458C82-9CE4-412F-8BD7-0D45CA175508")
#S = streamDaemon.streams()
LBuildings = LBD.loadBuildings()
#Stream = streamDaemon.S
print("Assigned stream")
urls = (
#"/(.*)", 'Service',
# "/realtime", dynamicPopulation.doPopulation,
"/footprint", F.footprint,
"/camera", D.DOTstream,
"/foursquareData", FS.foursquareData,
"/taxiData", TD.taxiData,
"/censusData", CD.censusData,
"/energyData", ED.energyData,
"/graph", graphBackend.G,
"/subway", SD.subwayData,
"/vehicle", VD.vehicleData,
"/GPSdata", GPSendpoint.GPSreport,
"/", "baseURL"
)
#initialization = dynamicPopulation.showDynamicPopulation()
#initialization.startup()
class baseURL:
def GET(self):
return "200 OK"
def POST(self):
return "200 OK"
class Service:
def GET(self, name):
web.header('Access-Control-Allow-Origin', '*')
web.header('Access-Control-Allow-Credentials', 'true')
return {'message': 'GET OK!'}
def POST(self, name):
web.header('Access-Control-Allow-Origin', '*')
web.header('Access-Control-Allow-Credentials', 'true')
data = web.data()
return {'message': "POST OK! %s" % data}
class MyApplication(web.application):
def run(self, port=8080, *middleware):
self.runDynamicPopulation()
#self.runTrafficCount()
#self.startDaemon()
func = self.wsgifunc(*middleware)
return web.httpserver.runsimple(func, ('0.0.0.0', port))
#def runTrafficCount(self):
def startDaemon(self):
t=Thread(target=self.runDynamicPopulation, args=())
t.setDaemon(True)
t.start()
def runDynamicPopulation(self):
#run1 = dynamicPopulation.showDynamicPopulation(1)
while True:
print("\n\nRunning dynamic\n\n")
#print("Number of building changes" + str(len(energyServer.S.buildingChangesList)))
# for (borough, block, lot, diff) in S.buildingChangesList:
# print(borough)
# print(block)
# print(lot)
# break
#convert borough block lot to BBL
print("#################SAVING SNAPSHOT##################")
start = time.time()
energyDictionary = db.energyDictionary(LBuildings.model, LBuildings.buildingParams, LBuildings.totals, LBuildings.referenceModels)
LBuildings.loadBuildingChanges(S.dynamicChanges)
populationDictionary = LBuildings.BBLpopulation
print("Energy Dictionary: " + str(sys.getsizeof(energyDictionary)))
print("Population Dictionary: " + str(sys.getsizeof(populationDictionary)))
#db.recordFullState(energyDictionary, populationDictionary)
#V.vehicleCountFromImage()
S.clearList()
end = time.time()
print("Finished: " + str(end-start) + " s")
print("#################SAVED SNAPSHOT#################")
#run1.getBlocks2Occupancy(20)
#run1.startup()
#run1.plotBuildings()
#run1.plotRealtime()
time.sleep(30)
return
def notfound():
return web.notfound("404 not found")
def run():
app = MyApplication(urls, globals())
app.notfound = notfound
app.run(port=8001) |
remind.py | # coding=utf-8
"""
remind.py - Sopel Reminder Module
Copyright 2011, Sean B. Palmer, inamidst.com
Licensed under the Eiffel Forum License 2.
http://sopel.chat
"""
import os
import re
import time
import threading
import collections
import codecs
from datetime import datetime
from sopel.module import commands, example, NOLIMIT
import sopel.tools
from sopel.tools.time import get_timezone, format_time
try:
import pytz
except:
pytz = None
def filename(self):
name = self.nick + '-' + self.config.core.host + '.reminders.db'
return os.path.join(self.config.core.homedir, name)
def load_database(name):
data = {}
if os.path.isfile(name):
f = codecs.open(name, 'r', encoding='utf-8')
for line in f:
unixtime, channel, nick, message = line.split('\t')
message = message.rstrip('\n')
t = int(float(unixtime)) # WTFs going on here?
reminder = (channel, nick, message)
try:
data[t].append(reminder)
except KeyError:
data[t] = [reminder]
f.close()
return data
def dump_database(name, data):
f = codecs.open(name, 'w', encoding='utf-8')
for unixtime, reminders in sopel.tools.iteritems(data):
for channel, nick, message in reminders:
f.write('%s\t%s\t%s\t%s\n' % (unixtime, channel, nick, message))
f.close()
def setup(bot):
bot.rfn = filename(bot)
bot.rdb = load_database(bot.rfn)
def monitor(bot):
time.sleep(5)
while True:
now = int(time.time())
unixtimes = [int(key) for key in bot.rdb]
oldtimes = [t for t in unixtimes if t <= now]
if oldtimes:
for oldtime in oldtimes:
for (channel, nick, message) in bot.rdb[oldtime]:
if message:
bot.msg(channel, nick + ': ' + message)
else:
bot.msg(channel, nick + '!')
del bot.rdb[oldtime]
dump_database(bot.rfn, bot.rdb)
time.sleep(2.5)
targs = (bot,)
t = threading.Thread(target=monitor, args=targs)
t.start()
scaling = collections.OrderedDict([
('years', 365.25 * 24 * 3600),
('year', 365.25 * 24 * 3600),
('yrs', 365.25 * 24 * 3600),
('y', 365.25 * 24 * 3600),
('months', 29.53059 * 24 * 3600),
('month', 29.53059 * 24 * 3600),
('mo', 29.53059 * 24 * 3600),
('weeks', 7 * 24 * 3600),
('week', 7 * 24 * 3600),
('wks', 7 * 24 * 3600),
('wk', 7 * 24 * 3600),
('w', 7 * 24 * 3600),
('days', 24 * 3600),
('day', 24 * 3600),
('d', 24 * 3600),
('hours', 3600),
('hour', 3600),
('hrs', 3600),
('hr', 3600),
('h', 3600),
('minutes', 60),
('minute', 60),
('mins', 60),
('min', 60),
('m', 60),
('seconds', 1),
('second', 1),
('secs', 1),
('sec', 1),
('s', 1),
])
periods = '|'.join(list(scaling.keys()))
@commands('in')
@example('.in 3h45m Go to class')
def remind(bot, trigger):
"""Gives you a reminder in the given amount of time."""
if not trigger.group(2):
bot.say("Missing arguments for reminder command.")
return NOLIMIT
if trigger.group(3) and not trigger.group(4):
bot.say("No message given for reminder.")
return NOLIMIT
duration = 0
message = [_f for _f in re.split('(\d+(?:\.\d+)? ?(?:(?i)' + periods + ')) ?',
trigger.group(2))[1:] if _f]
reminder = ''
stop = False
for piece in message:
grp = re.match('(\d+(?:\.\d+)?) ?(.*) ?', piece)
if grp and not stop:
length = float(grp.group(1))
factor = scaling.get(grp.group(2).lower(), 60)
duration += length * factor
else:
reminder = reminder + piece
stop = True
if duration == 0:
return bot.reply("Sorry, didn't understand the input.")
if duration % 1:
duration = int(duration) + 1
else:
duration = int(duration)
timezone = get_timezone(
bot.db, bot.config, None, trigger.nick, trigger.sender)
create_reminder(bot, trigger, duration, reminder, timezone)
@commands('at')
@example('.at 13:47 Do your homework!')
def at(bot, trigger):
"""
Gives you a reminder at the given time. Takes hh:mm:ssTimezone
message. Timezone is any timezone Sopel takes elsewhere; the best choices
are those from the tzdb; a list of valid options is available at
http://sopel.chat/tz . The seconds and timezone are optional.
"""
if not trigger.group(2):
bot.say("No arguments given for reminder command.")
return NOLIMIT
if trigger.group(3) and not trigger.group(4):
bot.say("No message given for reminder.")
return NOLIMIT
regex = re.compile(r'(\d+):(\d+)(?::(\d+))?([^\s\d]+)? (.*)')
match = regex.match(trigger.group(2))
if not match:
bot.reply("Sorry, but I didn't understand your input.")
return NOLIMIT
hour, minute, second, tz, message = match.groups()
if not second:
second = '0'
if pytz:
timezone = get_timezone(bot.db, bot.config, tz,
trigger.nick, trigger.sender)
if not timezone:
timezone = 'UTC'
now = datetime.now(pytz.timezone(timezone))
at_time = datetime(now.year, now.month, now.day,
int(hour), int(minute), int(second),
tzinfo=now.tzinfo)
timediff = at_time - now
else:
if tz and tz.upper() != 'UTC':
bot.reply("I don't have timzeone support installed.")
return NOLIMIT
now = datetime.now()
at_time = datetime(now.year, now.month, now.day,
int(hour), int(minute), int(second))
timediff = at_time - now
duration = timediff.seconds
if duration < 0:
duration += 86400
create_reminder(bot, trigger, duration, message, 'UTC')
def create_reminder(bot, trigger, duration, message, tz):
t = int(time.time()) + duration
reminder = (trigger.sender, trigger.nick, message)
try:
bot.rdb[t].append(reminder)
except KeyError:
bot.rdb[t] = [reminder]
dump_database(bot.rfn, bot.rdb)
if duration >= 60:
remind_at = datetime.utcfromtimestamp(t)
timef = format_time(bot.db, bot.config, tz, trigger.nick,
trigger.sender, remind_at)
bot.reply('Okay, will remind at %s' % timef)
else:
bot.reply('Okay, will remind in %s secs' % duration)
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import base64
import binascii
import datetime
import errno
import io
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
import zipfile
from distutils.version import StrictVersion
from urllib.error import URLError
from urllib.request import urlopen
import colorama
import dateutil.parser
import requests
import yaml
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._client_factory import (
cf_agent_pools,
cf_container_registry_service,
cf_container_services,
get_auth_management_client,
get_graph_rbac_management_client,
get_resource_by_name,
)
from azure.cli.command_modules.acs._consts import (
ADDONS,
CONST_ACC_SGX_QUOTE_HELPER_ENABLED,
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME,
CONST_CANIPULL_IMAGE,
CONST_CONFCOM_ADDON_NAME,
CONST_INGRESS_APPGW_ADDON_NAME,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID,
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME,
CONST_INGRESS_APPGW_SUBNET_CIDR,
CONST_INGRESS_APPGW_SUBNET_ID,
CONST_INGRESS_APPGW_WATCH_NAMESPACE,
CONST_KUBE_DASHBOARD_ADDON_NAME,
CONST_MANAGED_IDENTITY_OPERATOR_ROLE,
CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID,
CONST_MONITORING_ADDON_NAME,
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID,
CONST_MONITORING_USING_AAD_MSI_AUTH,
CONST_NODEPOOL_MODE_USER,
CONST_OPEN_SERVICE_MESH_ADDON_NAME,
CONST_ROTATION_POLL_INTERVAL,
CONST_SCALE_DOWN_MODE_DELETE,
CONST_SCALE_SET_PRIORITY_REGULAR,
CONST_SECRET_ROTATION_ENABLED,
CONST_SPOT_EVICTION_POLICY_DELETE,
CONST_VIRTUAL_NODE_ADDON_NAME,
CONST_VIRTUAL_NODE_SUBNET_NAME,
DecoratorEarlyExitException,
)
from azure.cli.command_modules.acs._helpers import get_snapshot_by_snapshot_id
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.command_modules.acs._resourcegroup import get_rg_location
from azure.cli.command_modules.acs._validators import extract_comma_separated_string
from azure.cli.command_modules.acs.addonconfiguration import (
add_ingress_appgw_addon_role_assignment,
add_monitoring_role_assignment,
add_virtual_node_role_assignment,
ensure_container_insights_for_monitoring,
ensure_default_log_analytics_workspace_for_monitoring,
)
from azure.cli.core._profile import Profile
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (
ArgumentUsageError,
AzureInternalError,
FileOperationError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
ResourceNotFoundError,
UnauthorizedError,
ValidationError,
)
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.profiles import ResourceType
from azure.cli.core.util import get_file_json, in_cloud_console, sdk_no_wait, shell_safe_json_parse
from azure.graphrbac.models import (
ApplicationCreateParameters,
ApplicationUpdateParameters,
GetObjectsParameters,
KeyCredential,
PasswordCredential,
RequiredResourceAccess,
ResourceAccess,
ServicePrincipalCreateParameters,
)
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.prompting import NoTTYException, prompt_y_n
from knack.util import CLIError
from msrestazure.azure_exceptions import CloudError
logger = get_logger(__name__)
# pylint: disable=unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def get_cmd_test_hook_data(filename):
hook_data = None
curr_dir = os.path.dirname(os.path.realpath(__file__))
test_hook_file_path = os.path.join(curr_dir, 'tests/latest/data', filename)
if os.path.exists(test_hook_file_path):
with open(test_hook_file_path, "r") as f:
hook_data = json.load(f)
return hook_data
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(
cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError(
'Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(
name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(
_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError(
'Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError(
'Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None, base_src_url=None,
kubelogin_version='latest', kubelogin_install_location=None,
kubelogin_base_src_url=None):
k8s_install_kubectl(cmd, client_version, install_location, base_src_url)
k8s_install_kubelogin(cmd, kubelogin_version,
kubelogin_install_location, kubelogin_base_src_url)
def k8s_install_kubectl(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
if not source_url:
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"',
install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
cloud_name = cmd.cli_ctx.cloud.name
if not source_url:
source_url = 'https://github.com/Azure/kubelogin/releases/download'
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
if platform.machine() == 'arm64':
sub_dir, binary_name = 'darwin_arm64', 'kubelogin'
else:
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"',
download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cmd, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cmd.cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate',
value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate',
value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(
cmd, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except CLIError as ex:
logger.warning(str(ex))
except Exception as ex: # pylint: disable=broad-except
logger.error(str(ex))
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None, is_service_principal=True):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False, is_service_principal=is_service_principal)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None, is_service_principal=True):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope,
is_service_principal=is_service_principal)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups,
is_service_principal=True):
assignee_object_id = None
if assignee:
if is_service_principal:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
else:
assignee_object_id = assignee
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(
default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(
default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict(
{"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(
_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(
windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
DeploymentProperties = cmd.get_models(
'DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
Deployment = cmd.get_models(
'Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
validation_poller = smc.begin_validate(
resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return smc.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, smc.begin_create_or_update, resource_group_name, deployment_name, deployment)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(
name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(
path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning(
'Failed to merge credentials to kube config file: %s', exc)
logger.warning(
'The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if not existing.get(key):
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
logger.warning(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.begin_create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError(
"service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(
reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cmd, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cmd,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cmd, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import get_sdk
factory = get_auth_management_client(cmd.cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(
resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = assignee
if resolve_assignee:
from azure.graphrbac.models import GraphErrorException
error_msg = "Failed to resolve service principal object ID: "
try:
object_id = _resolve_object_id(cmd.cli_ctx, assignee)
except GraphErrorException as ex:
if ex.response is not None:
error_code = getattr(ex.response, "status_code", None)
error_reason = getattr(ex.response, "reason", None)
internal_error = ""
if error_code:
internal_error += str(error_code)
if error_reason:
if internal_error:
internal_error += " - "
internal_error += str(error_reason)
if internal_error:
error_msg += "({}) ".format(internal_error)
error_msg += ex.message
# this should be UserFault or ServiceError, but it is meaningless to distinguish them here
raise CLIError(error_msg)
except Exception as ex: # pylint: disable=bare-except
raise CLIError(error_msg + str(ex))
assignment_name = uuid.uuid4()
custom_headers = None
RoleAssignmentCreateParameters = get_sdk(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
if cmd.supported_api_version(min_api='2018-01-01-preview', resource_type=ResourceType.MGMT_AUTHORIZATION):
parameters = RoleAssignmentCreateParameters(
role_definition_id=role_id, principal_id=object_id)
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
RoleAssignmentProperties = get_sdk(cmd.cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentProperties', mod='models',
operation_group='role_assignments')
properties = RoleAssignmentProperties(role_definition_id=role_id, principal_id=object_id)
return assignments_client.create(scope, assignment_name, properties, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(
scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(
filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError(
"No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cmd, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cmd.cli_ctx, scope)
assignments_client = factory.role_assignments
if cmd.supported_api_version(min_api='2018-01-01-preview', resource_type=ResourceType.MGMT_AUTHORIZATION):
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_check_acr(cmd, client, resource_group_name, name, acr):
if not which("kubectl"):
raise ValidationError("Can not find kubectl executable in PATH")
return_msg = None
fd, browse_path = tempfile.mkstemp()
try:
aks_get_credentials(
cmd, client, resource_group_name, name, admin=False, path=browse_path
)
# Get kubectl minor version
kubectl_minor_version = -1
try:
cmd = f"kubectl version -o json --kubeconfig {browse_path}"
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
jsonS, _ = output.communicate()
kubectl_version = json.loads(jsonS)
# Remove any non-numeric characters like + from minor version
kubectl_minor_version = int(re.sub(r"\D", "", kubectl_version["clientVersion"]["minor"]))
kubectl_server_minor_version = int(
kubectl_version["serverVersion"]["minor"])
kubectl_server_patch = int(
kubectl_version["serverVersion"]["gitVersion"].split(".")[-1])
if kubectl_server_minor_version < 17 or (kubectl_server_minor_version == 17 and kubectl_server_patch < 14):
logger.warning(
"There is a known issue for Kubernetes versions < 1.17.14 when connecting to "
"ACR using MSI. See https://github.com/kubernetes/kubernetes/pull/96355 for"
"more information."
)
except subprocess.CalledProcessError as err:
raise ValidationError(
"Could not find kubectl minor version: {}".format(err))
if kubectl_minor_version == -1:
raise ValidationError("Failed to get kubectl version")
podName = "canipull-" + str(uuid.uuid4())
overrides = {
"spec": {
"restartPolicy": "Never",
"hostNetwork": True,
"containers": [
{
"securityContext": {"runAsUser": 0},
"name": podName,
"image": CONST_CANIPULL_IMAGE,
"args": ["-v6", acr],
"stdin": True,
"stdinOnce": True,
"tty": True,
"volumeMounts": [
{"name": "azurejson", "mountPath": "/etc/kubernetes"},
{"name": "sslcerts", "mountPath": "/etc/ssl/certs"},
],
}
],
"tolerations": [
{"key": "CriticalAddonsOnly", "operator": "Exists"},
{"effect": "NoExecute", "operator": "Exists"},
],
"volumes": [
{"name": "azurejson", "hostPath": {"path": "/etc/kubernetes"}},
{"name": "sslcerts", "hostPath": {"path": "/etc/ssl/certs"}},
],
"nodeSelector": {"kubernetes.io/os": "linux"},
}
}
try:
cmd = [
"kubectl",
"run",
"--kubeconfig",
browse_path,
"--rm",
"--quiet",
"--image",
CONST_CANIPULL_IMAGE,
"--overrides",
json.dumps(overrides),
"-it",
podName,
"--namespace=default",
]
# Support kubectl versons < 1.18
if kubectl_minor_version < 18:
cmd += ["--generator=run-pod/v1"]
output = subprocess.check_output(
cmd,
universal_newlines=True,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as err:
raise AzureInternalError("Failed to check the ACR: {} Command output: {}".format(err, err.output))
if output:
print(output)
# only return the output in test case "test_aks_create_attach_acr"
test_hook_data = get_cmd_test_hook_data("test_aks_create_attach_acr.hook")
if test_hook_data:
test_configs = test_hook_data.get("configs", None)
if test_configs and test_configs.get("returnOutput", False):
return_msg = output
else:
raise AzureInternalError("Failed to check the ACR.")
finally:
os.close(fd)
return return_msg
# pylint: disable=too-many-statements,too-many-branches
def _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=resource_type,
operation_group='managed_clusters')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
return_msg = None
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
# Azure Portal URL (https://portal.azure.com for public cloud)
cmd.cli_ctx.cloud.endpoints.portal +
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning(
'To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
return_msg = "Kubernetes resources view on {}".format(dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return return_msg
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise FileOperationError('Can not find kubectl executable in PATH')
fd, browse_path = tempfile.mkstemp()
try:
aks_get_credentials(cmd, client, resource_group_name,
name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
[
"kubectl",
"get",
"pods",
"--kubeconfig",
browse_path,
"--namespace",
"kube-system",
"--output",
"name",
"--selector",
"k8s-app=kubernetes-dashboard",
],
universal_newlines=True,
stderr=subprocess.STDOUT,
)
except subprocess.CalledProcessError as err:
raise ResourceNotFoundError('Could not find dashboard pod: {} Command output: {}'.format(err, err.output))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise ResourceNotFoundError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
[
"kubectl",
"get",
"pods",
"--kubeconfig",
browse_path,
"--namespace",
"kube-system",
"--selector",
"k8s-app=kubernetes-dashboard",
"--output",
"jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'",
],
universal_newlines=True,
stderr=subprocess.STDOUT,
)
# output format: "'{port}'"
dashboard_port = int((dashboard_port.replace("'", "")))
except subprocess.CalledProcessError as err:
raise ResourceNotFoundError('Could not find dashboard port: {} Command output: {}'.format(err, err.output))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post(
'http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post(
"http://localhost:8888/openLink/{0}".format(term_id),
json={"url": dashboardURL},
)
logger.warning(
'To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
timeout = None
test_hook_data = get_cmd_test_hook_data("test_aks_browse_legacy.hook")
if test_hook_data:
test_configs = test_hook_data.get("configs", None)
if test_configs and test_configs.get("enableTimeout", False):
timeout = test_configs.get("timeoutInterval", None)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(
[
"kubectl",
"--kubeconfig",
browse_path,
"proxy",
"--address",
listen_address,
"--port",
listen_port,
],
universal_newlines=True,
stderr=subprocess.STDOUT,
timeout=timeout,
)
except subprocess.CalledProcessError as err:
if err.output.find('unknown flag: --address'):
return_msg = "Test Invalid Address! "
if listen_address != '127.0.0.1':
logger.warning(
'"--address" is only supported in kubectl v1.13 and later.')
logger.warning(
'The "--listen-address" argument will be ignored.')
try:
subprocess.call(["kubectl", "--kubeconfig",
browse_path, "proxy", "--port", listen_port], timeout=timeout)
except subprocess.TimeoutExpired:
logger.warning("Currently in a test environment, the proxy is closed due to a preset timeout!")
return_msg = return_msg if return_msg else ""
return_msg += "Test Passed!"
except subprocess.CalledProcessError as new_err:
raise AzureInternalError(
"Could not open proxy: {} Command output: {}".format(
new_err, new_err.output
)
)
else:
raise AzureInternalError(
"Could not open proxy: {} Command output: {}".format(
err, err.output
)
)
except subprocess.TimeoutExpired:
logger.warning("Currently in a test environment, the proxy is closed due to a preset timeout!")
return_msg = return_msg if return_msg else ""
return_msg += "Test Passed!"
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
finally:
os.close(fd)
return return_msg
# pylint: disable=too-many-statements,too-many-branches
def aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=False,
listen_address="127.0.0.1",
listen_port="8001",
):
return _aks_browse(
cmd,
client,
resource_group_name,
name,
disable_browser=disable_browser,
listen_address=listen_address,
listen_port=listen_port,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
)
# pylint: disable=too-many-locals
def aks_create(cmd, client, resource_group_name, name, ssh_key_value,
location=None,
kubernetes_version='',
admin_username="azureuser",
generate_ssh_keys=False, # pylint: disable=unused-argument
no_ssh_key=False,
edge_zone=None,
node_osdisk_diskencryptionset_id=None,
disable_local_accounts=False,
disable_rbac=None,
tags=None,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
outbound_type=None,
network_plugin=None,
network_policy=None,
auto_upgrade_channel=None,
cluster_autoscaler_profile=None,
uptime_sla=False,
dns_name_prefix=None,
fqdn_subdomain=None,
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
private_dns_zone=None,
disable_public_fqdn=False,
service_principal=None,
client_secret=None,
enable_managed_identity=True,
assign_identity=None,
assign_kubelet_identity=None,
enable_aad=False,
enable_azure_rbac=False,
aad_admin_group_object_ids=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
attach_acr=None,
skip_subnet_role_assignment=False,
enable_addons=None,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
aci_subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
rotation_poll_interval=None,
nodepool_name="nodepool1",
node_vm_size=None,
os_sku=None,
vnet_subnet_id=None,
pod_subnet_id=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
enable_cluster_autoscaler=False,
min_count=None,
max_count=None,
node_count=3,
nodepool_tags=None,
nodepool_labels=None,
node_osdisk_type=None,
node_osdisk_size=0,
vm_set_type=None,
zones=None,
ppg=None,
max_pods=0,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
enable_fips_image=False,
snapshot_id=None,
kubelet_config=None,
linux_os_config=None,
no_wait=False,
yes=False,
aks_custom_headers=None,
):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
# decorator pattern
from azure.cli.command_modules.acs.managed_cluster_decorator import AKSManagedClusterCreateDecorator
aks_create_decorator = AKSManagedClusterCreateDecorator(
cmd=cmd,
client=client,
raw_parameters=raw_parameters,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
)
try:
# construct mc profile
mc = aks_create_decorator.construct_mc_profile_default()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to create a real managed cluster
return aks_create_decorator.create_mc(mc)
def aks_update(cmd, client, resource_group_name, name,
disable_local_accounts=False,
enable_local_accounts=False,
tags=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
nat_gateway_managed_outbound_ip_count=None,
nat_gateway_idle_timeout=None,
auto_upgrade_channel=None,
cluster_autoscaler_profile=None,
uptime_sla=False,
no_uptime_sla=False,
api_server_authorized_ip_ranges=None,
enable_public_fqdn=False,
disable_public_fqdn=False,
enable_managed_identity=False,
assign_identity=None,
enable_aad=False,
enable_azure_rbac=False,
disable_azure_rbac=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
windows_admin_password=None,
enable_ahub=False,
disable_ahub=False,
enable_windows_gmsa=False,
gmsa_dns_server=None,
gmsa_root_domain_name=None,
attach_acr=None,
detach_acr=None,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
nodepool_labels=None,
no_wait=False,
yes=False,
aks_custom_headers=None):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
# decorator pattern
from azure.cli.command_modules.acs.managed_cluster_decorator import AKSManagedClusterUpdateDecorator
aks_update_decorator = AKSManagedClusterUpdateDecorator(
cmd=cmd,
client=client,
raw_parameters=raw_parameters,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
)
try:
# update mc profile
mc = aks_update_decorator.update_mc_profile_default()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to update the real managed cluster
return aks_update_decorator.update_mc(mc)
# pylint: disable=line-too-long
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
try:
if addons == "monitoring" and CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled and \
CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
# remove the DCR association because otherwise the DCR can't be deleted
ensure_container_insights_for_monitoring(
cmd,
instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
subscription_id,
resource_group_name,
name,
instance.location,
remove_monitoring=True,
aad_route=True,
create_dcr=False,
create_dcra=True
)
except TypeError:
pass
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
# pylint: disable=line-too-long
def aks_enable_addons(cmd, client, resource_group_name, name, addons,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
rotation_poll_interval=None,
no_wait=False,
enable_msi_auth_for_monitoring=False):
instance = client.get(resource_group_name, name)
msi_auth = False
if instance.service_principal_profile.client_id == "msi":
msi_auth = True
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id,
enable_msi_auth_for_monitoring=enable_msi_auth_for_monitoring,
subnet_name=subnet_name,
appgw_name=appgw_name,
appgw_subnet_cidr=appgw_subnet_cidr,
appgw_id=appgw_id,
appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper,
enable_secret_rotation=enable_secret_rotation,
rotation_poll_interval=rotation_poll_interval,
no_wait=no_wait)
enable_monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
virtual_node_addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
enable_virtual_node = (virtual_node_addon_name in instance.addon_profiles and
instance.addon_profiles[virtual_node_addon_name].enabled)
need_pull_for_result = enable_monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_pull_for_result:
if enable_monitoring:
if CONST_MONITORING_USING_AAD_MSI_AUTH in instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config and \
str(instance.addon_profiles[CONST_MONITORING_ADDON_NAME].config[CONST_MONITORING_USING_AAD_MSI_AUTH]).lower() == 'true':
if msi_auth:
# create a Data Collection Rule (DCR) and associate it with the cluster
ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME],
subscription_id,
resource_group_name,
name,
instance.location,
aad_route=True,
create_dcr=True,
create_dcra=True)
else:
raise ArgumentUsageError(
"--enable-msi-auth-for-monitoring can not be used on clusters with service principal auth.")
else:
# monitoring addon will use legacy path
ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME], subscription_id, resource_group_name, name, instance.location, aad_route=False)
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
# For monitoring addon, Metrics role assignement doesnt require in case of MSI auth
if enable_monitoring and not enable_msi_auth_for_monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(
result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False, context_name=None, public_fqdn=False,
credential_format=None):
credentialResults = None
serverType = None
if public_fqdn:
serverType = 'public'
if credential_format:
credential_format = credential_format.lower()
if admin:
raise InvalidArgumentValueError("--format can only be specified when requesting clusterUser credential.")
if admin:
if cmd.cli_ctx.cloud.profile == "latest":
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name, serverType)
else:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name)
else:
if cmd.cli_ctx.cloud.profile == "latest":
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name, serverType, credential_format)
else:
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name)
# Check if KUBECONFIG environmental variable is set
# If path is different than default then that means -f/--file is passed
# in which case we ignore the KUBECONFIG variable
# KUBECONFIG can be colon separated. If we find that condition, use the first entry
if "KUBECONFIG" in os.environ and path == os.path.join(os.path.expanduser('~'), '.kube', 'config'):
path = os.environ["KUBECONFIG"].split(":")[0]
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
ManagedClusterServicePrincipalProfile = cmd.get_models('ManagedClusterServicePrincipalProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError(
'usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError(
'usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=service_principal, secret=client_secret
)
return sdk_no_wait(no_wait,
client.begin_reset_service_principal_profile,
resource_group_name,
name, service_principal_profile)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.begin_reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP profile because otherwise validation complains
instance.service_principal_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements
def aks_upgrade(cmd,
client,
resource_group_name, name,
kubernetes_version='',
control_plane_only=False,
node_image_only=False,
no_wait=False,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(True, agent_pool_client,
resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
agent_profile.creation_data = None
# null out the SP profile because otherwise validation complains
instance.service_principal_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name,
snapshot_id=None):
headers = {}
if snapshot_id:
headers["AKSSnapshotId"] = snapshot_id
return sdk_no_wait(
no_wait,
client.begin_upgrade_node_image_version,
resource_group_name,
cluster_name,
nodepool_name,
headers=headers)
def aks_runcommand(cmd, client, resource_group_name, name, command_string="", command_files=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not command_string:
raise ValidationError('Command cannot be empty.')
RunCommandRequest = cmd.get_models('RunCommandRequest', resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
request_payload = RunCommandRequest(command=command_string)
request_payload.context = _get_command_context(command_files)
# if this cluster have Azure AD enabled, we should pass user token.
# so the command execution also using current user identity.
# here we aquire token for AKS managed server AppID (same id for all cloud)
if mc.aad_profile is not None and mc.aad_profile.managed:
request_payload.cluster_token = _get_dataplane_aad_token(
cmd.cli_ctx, "6dae42f8-4368-4678-94ff-3960e28e3630")
commandResultFuture = client.begin_run_command(
resource_group_name, name, request_payload, polling_interval=5, retry_total=0)
return _print_command_result(cmd.cli_ctx, commandResultFuture.result(300))
def aks_command_result(cmd, client, resource_group_name, name, command_id=""):
if not command_id:
raise ValidationError('CommandID cannot be empty.')
commandResult = client.get_command_result(
resource_group_name, name, command_id)
return _print_command_result(cmd.cli_ctx, commandResult)
def _print_command_result(cli_ctx, commandResult):
# cli_ctx.data['safe_params'] contains list of parameter name user typed in, without value.
# cli core also use this calculate ParameterSetName header for all http request from cli.
if (cli_ctx.data['safe_params'] is None or
"-o" in cli_ctx.data['safe_params'] or
"--output" in cli_ctx.data['safe_params']):
# user specified output format, honor their choice, return object to render pipeline
return commandResult
# user didn't specified any format, we can customize the print for best experience
if commandResult.provisioning_state == "Succeeded":
# succeed, print exitcode, and logs
print(
f"{colorama.Fore.GREEN}command started at {commandResult.started_at}, "
f"finished at {commandResult.finished_at} "
f"with exitcode={commandResult.exit_code}{colorama.Style.RESET_ALL}")
print(commandResult.logs)
return
if commandResult.provisioning_state == "Failed":
# failed, print reason in error
print(
f"{colorama.Fore.RED}command failed with reason: {commandResult.reason}{colorama.Style.RESET_ALL}")
return
# *-ing state
print(f"{colorama.Fore.BLUE}command is in : {commandResult.provisioning_state} state{colorama.Style.RESET_ALL}")
return None
def _get_command_context(command_files):
if not command_files:
return ""
filesToAttach = {}
# . means to attach current folder, cannot combine more files. (at least for now)
if len(command_files) == 1 and command_files[0] == ".":
# current folder
cwd = os.getcwd()
for filefolder, _, files in os.walk(cwd):
for file in files:
# retain folder structure
rel = os.path.relpath(filefolder, cwd)
filesToAttach[os.path.join(
filefolder, file)] = os.path.join(rel, file)
else:
for file in command_files:
if file == ".":
raise ValidationError(
". is used to attach current folder, not expecting other attachements.")
if os.path.isfile(file):
# for individual attached file, flatten them to same folder
filesToAttach[file] = os.path.basename(file)
else:
raise ValidationError(
f"{file} is not valid file, or not accessable.")
if len(filesToAttach) < 1:
logger.debug("no files to attach!")
return ""
zipStream = io.BytesIO()
zipFile = zipfile.ZipFile(zipStream, "w")
for _, (osfile, zipEntry) in enumerate(filesToAttach.items()):
zipFile.write(osfile, zipEntry)
# zipFile.printdir() // use this to debug
zipFile.close()
return str(base64.encodebytes(zipStream.getbuffer()), "utf-8")
def _get_dataplane_aad_token(cli_ctx, serverAppId):
# this function is mostly copied from keyvault cli
return Profile(cli_ctx=cli_ctx).get_raw_token(resource=serverAppId)[0][2].get('accessToken')
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(
name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError(
"Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(
name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable,
workspace_resource_id=None,
enable_msi_auth_for_monitoring=False,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_secret_rotation=False,
disable_secret_rotation=False,
rotation_poll_interval=None,
no_wait=False):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}
addon_profile.config[CONST_MONITORING_USING_AAD_MSI_AUTH] = enable_msi_auth_for_monitoring
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise ValidationError('The confcom addon is already enabled for this managed cluster.',
recommendation='To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
elif addon == CONST_OPEN_SERVICE_MESH_ADDON_NAME:
if addon_profile.enabled:
raise AzureInternalError(
'The open-service-mesh addon is already enabled for this managed '
'cluster.\n To change open-service-mesh configuration, run '
'"az aks disable-addons -a open-service-mesh -n {} -g {}" '
'before enabling it again.'
.format(name, resource_group_name))
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
elif addon == CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME:
if addon_profile.enabled:
raise ArgumentUsageError(
'The azure-keyvault-secrets-provider addon is already enabled for this managed cluster.\n'
'To change azure-keyvault-secrets-provider configuration, run '
f'"az aks disable-addons -a azure-keyvault-secrets-provider -n {name} -g {resource_group_name}" ' # pylint: disable=line-too-long
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_SECRET_ROTATION_ENABLED: "false", CONST_ROTATION_POLL_INTERVAL: "2m"})
if enable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "true"
if disable_secret_rotation:
addon_profile.config[CONST_SECRET_ROTATION_ENABLED] = "false"
if rotation_poll_interval is not None:
addon_profile.config[CONST_ROTATION_POLL_INTERVAL] = rotation_poll_interval
addon_profiles[CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME] = addon_profile
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP profile because otherwise validation complains
instance.service_principal_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error(
"Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (
ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_aks_acr(cmd,
assignee,
acr_name_or_id,
subscription_id,
detach=False,
is_service_principal=True):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cmd.cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cmd, assignee, registry.id, detach, is_service_principal)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cmd.cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cmd, assignee, registry.id, detach, is_service_principal)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
node_vm_size=None,
os_type=None,
os_sku=None,
vnet_subnet_id=None,
pod_subnet_id=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
enable_cluster_autoscaler=False,
min_count=None,
max_count=None,
node_count=3,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
labels=None,
tags=None,
node_taints=None,
node_osdisk_type=None,
node_osdisk_size=0,
mode=CONST_NODEPOOL_MODE_USER,
scale_down_mode=CONST_SCALE_DOWN_MODE_DELETE,
max_surge=None,
max_pods=0,
zones=None,
ppg=None,
enable_encryption_at_host=False,
enable_ultra_ssd=False,
enable_fips_image=False,
snapshot_id=None,
kubelet_config=None,
linux_os_config=None,
no_wait=False,
aks_custom_headers=None):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
# decorator pattern
from azure.cli.command_modules.acs.agentpool_decorator import AKSAgentPoolAddDecorator
from azure.cli.command_modules.acs._consts import AgentPoolDecoratorMode
aks_agentpool_add_decorator = AKSAgentPoolAddDecorator(
cmd=cmd,
client=client,
raw_parameters=raw_parameters,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
agentpool_decorator_mode=AgentPoolDecoratorMode.STANDALONE,
)
try:
# construct agentpool profile
agentpool = aks_agentpool_add_decorator.construct_agentpool_profile_default()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to add a real agentpool
return aks_agentpool_add_decorator.add_agentpool(agentpool)
# pylint: disable=too-many-boolean-expressions
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None,
max_count=None,
labels=None,
tags=None,
node_taints=None,
mode=None,
scale_down_mode=None,
max_surge=None,
no_wait=False,
aks_custom_headers=None):
# DO NOT MOVE: get all the original parameters and save them as a dictionary
raw_parameters = locals()
# decorator pattern
from azure.cli.command_modules.acs.agentpool_decorator import AKSAgentPoolUpdateDecorator
from azure.cli.command_modules.acs._consts import AgentPoolDecoratorMode
aks_agentpool_update_decorator = AKSAgentPoolUpdateDecorator(
cmd=cmd,
client=client,
raw_parameters=raw_parameters,
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
agentpool_decorator_mode=AgentPoolDecoratorMode.STANDALONE,
)
try:
# update agentpool profile
agentpool = aks_agentpool_update_decorator.update_agentpool_profile_default()
except DecoratorEarlyExitException:
# exit gracefully
return None
# send request to update the real agentpool
return aks_agentpool_update_decorator.update_agentpool(agentpool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
nodepool_name,
kubernetes_version='',
node_image_only=False,
max_surge=None,
no_wait=False,
aks_custom_headers=None,
snapshot_id=None):
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings', operation_group='agent_pools')
if kubernetes_version != '' and node_image_only:
raise CLIError(
'Conflicting flags. Upgrading the Kubernetes version will also '
'upgrade node image version. If you only want to upgrade the '
'node version please use the "--node-image-only" option only.'
)
# Note: we exclude this option because node image upgrade can't accept nodepool put fields like max surge
if max_surge and node_image_only:
raise MutuallyExclusiveArgumentError(
'Conflicting flags. Unable to specify max-surge with node-image-only.'
'If you want to use max-surge with a node image upgrade, please first '
'update max-surge using "az aks nodepool update --max-surge".'
)
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name,
snapshot_id)
# load model CreationData
from azure.cli.command_modules.acs.decorator import AKSModels
CreationData = AKSModels(cmd, ResourceType.MGMT_CONTAINERSERVICE).CreationData
creationData = None
if snapshot_id:
snapshot = get_snapshot_by_snapshot_id(cmd.cli_ctx, snapshot_id)
if not kubernetes_version and not node_image_only:
kubernetes_version = snapshot.kubernetes_version
creationData = CreationData(
source_resource_id=snapshot_id
)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
instance.creation_data = creationData
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
# custom headers
aks_custom_headers = extract_comma_separated_string(
aks_custom_headers,
enable_strip=True,
extract_kv=True,
default_value={},
allow_appending_values_to_same_key=True,
)
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
headers=aks_custom_headers,
)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cmd,
assignee,
registry_id,
detach=False,
is_service_principal=True):
if detach:
if not _delete_role_assignments(cmd.cli_ctx,
'acrpull',
assignee,
scope=registry_id,
is_service_principal=is_service_principal):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cmd,
'acrpull',
assignee,
scope=registry_id,
is_service_principal=is_service_principal):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal, aad_session_key = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cmd,
cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
OpenShiftManagedClusterAADIdentityProvider = cmd.get_models('OpenShiftManagedClusterAADIdentityProvider',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(
identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[
app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satisfy AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
OpenShiftManagedClusterAgentPoolProfile = cmd.get_models('OpenShiftManagedClusterAgentPoolProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftAgentPoolProfileRole = cmd.get_models('OpenShiftAgentPoolProfileRole',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterIdentityProvider = cmd.get_models('OpenShiftManagedClusterIdentityProvider',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedCluster = cmd.get_models('OpenShiftManagedCluster',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftRouterProfile = cmd.get_models('OpenShiftRouterProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
NetworkProfile = cmd.get_models('NetworkProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterAuthProfile = cmd.get_models('OpenShiftManagedClusterAuthProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('Support for the creation of ARO 3.11 clusters ends 30 Nov 2020. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
if location is None:
location = get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd,
cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(
identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(
vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd,
cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError(
'Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError(
'Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(
compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
poller = client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
# Grant vnet permission to system assigned identity RIGHT AFTER
# the cluster is put, this operation can reduce latency for the
# role assignment take effect
if need_grant_vnet_permission_to_cluster_identity:
instant_cluster = client.get(resource_group_name, name)
if not _add_role_assignment(cmd, 'Network Contributor',
instant_cluster.identity.principal_id, scope=vnet_subnet_id,
is_service_principal=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(poller)
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_object_id = cluster.identity_profile["kubeletidentity"].object_id
_ensure_aks_acr(cmd,
assignee=kubelet_identity_object_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id,
is_service_principal=False)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
def _ensure_cluster_identity_permission_on_kubelet_identity(cmd, cluster_identity_object_id, scope):
factory = get_auth_management_client(cmd.cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cmd, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise UnauthorizedError('Could not grant Managed Identity Operator '
'permission to cluster identity at scope {}'.format(scope))
def aks_nodepool_snapshot_create(cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
resource_group_name,
snapshot_name,
nodepool_id,
location=None,
tags=None,
aks_custom_headers=None,
no_wait=False):
rg_location = get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# load model CreationData, Snapshot
from azure.cli.command_modules.acs.decorator import AKSModels
CreationData = AKSModels(cmd, ResourceType.MGMT_CONTAINERSERVICE).CreationData
Snapshot = AKSModels(cmd, ResourceType.MGMT_CONTAINERSERVICE).Snapshot
creationData = CreationData(
source_resource_id=nodepool_id
)
snapshot = Snapshot(
name=snapshot_name,
tags=tags,
location=location,
creation_data=creationData
)
# custom headers
aks_custom_headers = extract_comma_separated_string(
aks_custom_headers,
enable_strip=True,
extract_kv=True,
default_value={},
allow_appending_values_to_same_key=True,
)
return client.create_or_update(resource_group_name, snapshot_name, snapshot, headers=aks_custom_headers)
def aks_nodepool_snapshot_show(cmd, client, resource_group_name, snapshot_name): # pylint: disable=unused-argument
snapshot = client.get(resource_group_name, snapshot_name)
return snapshot
def aks_nodepool_snapshot_delete(cmd, # pylint: disable=unused-argument
client,
resource_group_name,
snapshot_name,
no_wait=False,
yes=False):
msg = 'This will delete the snapshot "{}" in resource group "{}", Are you sure?'.format(snapshot_name, resource_group_name)
if not yes and not prompt_y_n(msg, default="n"):
return None
return client.delete(resource_group_name, snapshot_name)
def aks_nodepool_snapshot_list(cmd, client, resource_group_name=None): # pylint: disable=unused-argument
if resource_group_name is None or resource_group_name == '':
return client.list()
return client.list_by_resource_group(resource_group_name)
def _get_kubelet_config(file_path):
if not os.path.isfile(file_path):
raise InvalidArgumentValueError("{} is not valid file, or not accessable.".format(file_path))
kubelet_config = get_file_json(file_path)
if not isinstance(kubelet_config, dict):
msg = "Error reading kubelet configuration at {}. Please see https://aka.ms/CustomNodeConfig for proper format."
raise InvalidArgumentValueError(msg.format(file_path))
return kubelet_config
def _get_linux_os_config(file_path):
if not os.path.isfile(file_path):
raise InvalidArgumentValueError("{} is not valid file, or not accessable.".format(file_path))
os_config = get_file_json(file_path)
if not isinstance(os_config, dict):
msg = "Error reading Linux OS configuration at {}. \
Please see https://aka.ms/CustomNodeConfig for proper format."
raise InvalidArgumentValueError(msg.format(file_path))
return os_config
|
discordgenerator.py | import undetected_chromedriver as uc
uc.install()
import os
import time
import requests
import random
import string
import sys
import threading
import datetime
import re
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from colorama import Fore, Style, init
from bs4 import BeautifulSoup as soup
from sys import stdout
from src import UI
from src import GmailnatorRead, GmailnatorGet, dfilter_email, pfilter_email, find_email_type
init(convert=True)
lock = threading.Lock()
def password_gen(length=8, chars= string.ascii_letters + string.digits + string.punctuation):
return ''.join(random.choice(chars) for _ in range(length))
# def minute_timer():
# while True:
# elapsed = time.strftime('%H:%M:%S', time.gmtime(time.time() - start))
# os.system(f'title Discord Generator ^| Rate Limit Timer ^| Time Elapsed {elapsed}')
# time.sleep(0.05)
# if elapsed == '00:01:00':
# print(f"{Fore.LIGHTMAGENTA_EX}[!]{Style.RESET_ALL} Timer finished.")
# break
def gather_proxy():
proxies = []
with open('config/proxies.txt', 'r', encoding='UTF-8') as file:
lines = file.readlines()
for line in lines:
proxies.append(line.replace('\n', ''))
return proxies
def free_print(arg):
lock.acquire()
stdout.flush()
print(arg)
lock.release()
class DiscordGen:
def __init__(self, email, username, password, proxy=None):
options = webdriver.ChromeOptions()
options.add_experimental_option("excludeSwitches", ["enable-logging"])
if proxy:
options.add_argument('--proxy-server=%s' % proxy)
self.driver = webdriver.Chrome(options=options, executable_path=r"chromedriver")
self.email= email
self.username = username
self.password = password
def register(self):
self.driver.get('https://discord.com/register')
free_print(f"{Fore.LIGHTMAGENTA_EX}[!]{Style.RESET_ALL} Webdriver wait")
WebDriverWait(self.driver, 1).until(EC.presence_of_element_located((By.XPATH, "//input[@type='email']")))
free_print(f"{Fore.LIGHTMAGENTA_EX}[*]{Style.RESET_ALL} " +self.email)
self.driver.find_element_by_xpath("//input[@type='email']").send_keys(self.email)
free_print(f"{Fore.LIGHTMAGENTA_EX}[*]{Style.RESET_ALL} " +self.username)
self.driver.find_element_by_xpath("//input[@type='text']").send_keys(self.username)
free_print(f"{Fore.LIGHTMAGENTA_EX}[*]{Style.RESET_ALL} " +self.password)
self.driver.find_element_by_xpath("//input[@type='password']").send_keys(self.password)
free_print(f"{Fore.LIGHTMAGENTA_EX}[*]{Style.RESET_ALL}" +' Random Date')
dateWorking = False
#sometimes different discord languages have different xpath locations
try: #if date could not be found via divs
actions = ActionChains(self.driver)
time.sleep(.5)
# Locating to the first date input then the discord will navigate the focuse to the next input
self.driver.find_elements_by_class_name('css-1hwfws3')[0].click()
actions.send_keys(str(random.randint(1,12))) # Submitting the month
actions.send_keys(Keys.ENTER)
actions.send_keys(str(random.randint(1,28))) # Submitting the day
actions.send_keys(Keys.ENTER)
actions.send_keys(str(random.randint(1990,2001))) # Submitting the year
actions.send_keys(Keys.ENTER)
actions.send_keys(Keys.TAB) # Navigating to continue button
actions.send_keys(Keys.ENTER) # Creates the account
actions.perform() # All the actions are pending and needs to perform all at once
except:
free_print(f"\n{Fore.LIGHTMAGENTA_EX}[!]{Style.RESET_ALL} " + 'Error in typing date. Please type the date manually.')
input(f"{Fore.LIGHTMAGENTA_EX}[!]{Style.RESET_ALL} Submit your form manually. Have you put the date? [y/n] > ") # Fixed typo
free_print(f'{Fore.LIGHTMAGENTA_EX}[*]{Style.RESET_ALL} Submit form')
if dateWorking:
actions = ActionChains(self.driver)
actions.send_keys(str(random.randint(1,12)))# Month
actions.send_keys(Keys.ENTER)
actions.send_keys(str(random.randint(1,28))) #Day
actions.send_keys(Keys.ENTER)
random_year = [1989,1990,1991,1992,1993,1994,1995,1996,1997,1998,1999,2000]
actions.send_keys(str(random.choice(random_year))) #Year
actions.perform()
#Submit form
try:
self.driver.find_element_by_class_name('inputDefault-3JxKJ2').click() # Agree to terms and conditions
except:
free_print(f"{Fore.LIGHTMAGENTA_EX}[*]{Style.RESET_ALL} Could not find button. Ignoring..")
pass
#input(f'{Fore.LIGHTMAGENTA_EX}[!]{Style.RESET_ALL} Press ENTER to create account.')
self.driver.find_element_by_class_name('button-3k0cO7').click() # Submit button
free_print(f'{Fore.LIGHTMAGENTA_EX}[*]{Style.RESET_ALL} Submit form')
while True:
lock.acquire()
checker = input(f"{Fore.LIGHTMAGENTA_EX}[!]{Style.RESET_ALL} Have you solved the captcha and submit? [y/n] > ")
lock.release()
if checker == "y":
self.token = self.driver.execute_script("let popup; popup = window.open('', '', `width=1,height=1`); if(!popup || !popup.document || !popup.document.write) console.log('Please allow popups'); window.dispatchEvent(new Event('beforeunload')); token = popup.localStorage.token.slice(1, -1); popup.close(); return token")
break
return True
elif checker =="n":
sys.exit()
return False
def verify_account(self,link):
self.driver.get(link)
free_print(f"{Fore.LIGHTMAGENTA_EX}[!]{Style.RESET_ALL} Task complete")
def close_driver(self):
self.driver.close()
def start_verify(email, email_type): #email, 'dot'/'plus'
free_print(f'{Fore.LIGHTMAGENTA_EX}[*]{Style.RESET_ALL} Checking email inbox.')
raw_email = email
if email_type == 'dot':
email = dfilter_email(raw_email)
if email_type == 'plus':
email = pfilter_email(raw_email)
g = GmailnatorRead(email, raw_email, email_type)
retry_count = 1
while retry_count <= 6:
gmailnator_inbox = g.get_inbox()
for x in range(len(gmailnator_inbox)): # for each email
discord_keywords = re.findall('Discord', gmailnator_inbox[x])
if 'Discord' in discord_keywords:
#retrive messages from inbox
bs = soup(gmailnator_inbox[x], 'html.parser')
href_links = [a['href'] for a in bs.find_all('a')]
first_message = href_links[0] #get first message which is most likely from Discord verify.
remove = re.compile('(^.*?(?=[#])[#])') #only get id; remove unnecessary stuff
first_id = remove.sub('', first_message)
message_html = g.get_single_message(first_id)
content_html = soup(message_html, 'html.parser')
message_links = [a['href'] for a in content_html.find_all('a')]
try:
discord_verify = message_links[1]
free_print(f'{Fore.LIGHTMAGENTA_EX}[*]{Style.RESET_ALL} Extracted discord link.')
except IndexError:
free_print(f'{Fore.LIGHTMAGENTA_EX}[!]{Style.RESET_ALL} List index out of range.')
discord_verify = None
return discord_verify
else:
free_print(f'{Fore.LIGHTMAGENTA_EX}[*]{Style.RESET_ALL} Discord keyword not found in that email. Trying an other one...')
free_print(f'{Fore.LIGHTMAGENTA_EX}[*]{Style.RESET_ALL} Inbox empty. Retry count: {retry_count}')
free_print(f'{Fore.LIGHTMAGENTA_EX}[*]{Style.RESET_ALL} Sleeping for 15 seconds. Waiting for Discord email.')
time.sleep(15)
free_print(f'{Fore.LIGHTMAGENTA_EX}[*]{Style.RESET_ALL} Discord keyword not found. Unable to verify account via email.')
return False # cant find any email with the word discord in it
def worker(proxy=None):
if proxy:
free_print(f"{Fore.LIGHTMAGENTA_EX}[*]{Style.RESET_ALL} Proxy used {proxy} ")
free_print(f"{Fore.LIGHTMAGENTA_EX}[!]{Style.RESET_ALL} Scraping email. ")
g = GmailnatorGet()
new_email = g.get_email()
free_print(f"{Fore.LIGHTMAGENTA_EX}[*]{Style.RESET_ALL} Scraped {new_email}")
email_type = find_email_type(new_email)
if email_type =='dot':
filtered_email = dfilter_email(new_email)
if email_type == 'plus':
filtered_email = pfilter_email(new_email)
discord_usernames = []
with open('config/discord_usernames.txt', 'r', encoding='UTF-8') as username_txt:
lines = username_txt.readlines()
for line in lines:
discord_usernames.append(line.replace('\n', ''))
username = random.choice(discord_usernames)
password = password_gen()
if not proxy:
d = DiscordGen(new_email, username, password)
if proxy:
d = DiscordGen(new_email, username, password, proxy = proxy)
try:
d.register()
token = str(d.token)
lock.acquire()
with open('output/login.txt', 'a', encoding='UTF-8') as login_file:
login_file.write(new_email + ':' + password + ':' + token + '\n')
lock.release()
try:
verify_link = start_verify(new_email, email_type)
if verify_link:
d.verify_account(verify_link)
os.system('pause>nul')
d.close_driver()
else:
d.verify_account('https://www.gmailnator.com/inbox/#' + new_email)
os.system('pause>nul')
except Exception as e:
print('some error occured')
print(e)
d.verify_account('https://www.gmailnator.com/inbox/#' + new_email)
os.system('pause>nul')
d.close_driver()
except WebDriverException:
free_print(f"{Fore.LIGHTMAGENTA_EX}[!]{Style.RESET_ALL} Webdriver Error. Unable to continue.")
free_print(f"{Fore.LIGHTMAGENTA_EX}[!]{Style.RESET_ALL} Worker task ended.")
def menu():
proxies = gather_proxy()
os.system('cls')
if len(proxies) != 0:
os.system('title Discord Generator ^| coded by Christallinqq ^| PROXY LIST DETECTED')
else:
os.system('title Discord Generator ^| coded by Christallinqq ')
UI.banner()
UI.start_menu()
try:
user_input = int(input(f"\t\t{Fore.LIGHTMAGENTA_EX}[?]{Style.RESET_ALL} > "))
print('\n\n')
except ValueError:
user_input = 0
if user_input == 1:
os.system('cls')
UI.banner()
UI.menu2()
try:
user_input = int(input(f"\t\t{Fore.LIGHTMAGENTA_EX}[?]{Style.RESET_ALL} > "))
print('\n\n')
except ValueError:
user_input = 0
if user_input == 1:
return 2
elif user_input == 2:
return 1
else:
return None
def main():
continue_program = True
m = menu()
if m == 1:
user_thread= True
elif m == 2:
user_thread = False
else:
continue_program = False
if continue_program:
if user_thread:
print(f"{Fore.LIGHTMAGENTA_EX}[WARNING]{Style.RESET_ALL} Do not put a lot of threads or you will crash. 2 threads is decent. (chrome windows)")
num_thread = int(input(f"{Fore.LIGHTMAGENTA_EX}[>]{Style.RESET_ALL} Enter number of threads [eg. 3] > "))
proxies = gather_proxy()
os.system('cls')
UI.banner()
print('\n\n')
if user_thread:
threads = []
if len(proxies) != 0:
os.system('title Discord Generator ^| Proxy: True ^| Threading: True')
for i in range(num_thread):
t = threading.Thread(target=worker, args= (random.choice(proxies), ))
threads.append(t)
t.start()
else:
os.system('title Discord Generator ^| Proxy: False ^| Threading: True')
for i in range(num_thread):
t = threading.Thread(target=worker)
threads.append(t)
t.start()
else:
if len(proxies) != 0:
os.system('title Discord Generator ^| Proxy: True ^| Threading: False')
worker(random.choice(proxies))
else:
os.system('title Discord Generator ^| Proxy: False ^| Threading: False')
worker()
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.